#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import sys
import argparse
import subprocess
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler

def setup_device(device_pref):
    """
    Set up the training device based on the specified preference.

    Args:
        device_pref (str): Device preference, e.g., "auto", "cuda", "cpu".

    Returns:
        torch.device: The selected device for training.
    """
    import torch
    print(f"\n==============================")
    print('🟧\tPisces L1 Training Start!')
    if device_pref == "auto":
        # Automatically select CUDA if available, otherwise use CPU
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    else:
        # Use the specified device
        device = torch.device(device_pref)
    print(f"✅\tUsing device: {device}")
    if torch.cuda.is_available():
        # Print GPU information if available
        print(f"✅\tGPU: {torch.cuda.get_device_name(0)}")
        print(f"✅\tGPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB")
    else:
        print("❌\tNo GPU available, using CPU")
    return device

def collate_fn(batch):
    """
    Collate a batch of data into a dictionary of tensors.

    Args:
        batch (list): A list of dictionaries containing data items.

    Returns:
        dict: A dictionary containing collated tensors for training.
    """
    import torch
    # Maximum sequence length for input and labels
    MAX_SEQ_LEN = 256
    # Extract input IDs from each item in the batch
    input_ids = [item["input_ids"] for item in batch]
    # Pad input IDs to the same length
    input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True, padding_value=0)
    # Truncate input IDs if they exceed the maximum sequence length
    if input_ids.shape[1] > MAX_SEQ_LEN:
        input_ids = input_ids[:, :MAX_SEQ_LEN]
    # Placeholder for pixel values
    pixel_values = None
    # Placeholder for audio input
    audio_input = None
    # Clone input IDs as labels
    labels = input_ids.clone()
    # Truncate labels if they exceed the maximum sequence length
    if labels.shape[1] > MAX_SEQ_LEN:
        labels = labels[:, :MAX_SEQ_LEN]
    # Add reasoning correctness labels
    correct = torch.tensor([item["correct"] for item in batch], dtype=torch.long)
    return {
        "input_ids": input_ids,
        "labels": labels,
        "pixel_values": pixel_values,
        "audio_input": audio_input,
        "correct": correct
    }

def train(args):
    """
    Train the Pisces model based on the provided arguments.

    Args:
        args (argparse.Namespace): Command line arguments for training.
    """
    import torch
    from data.dataset import PiscesDataset
    from torch.utils.data import DataLoader
    from model import PiscesModel, PiscesConfig
    from trainer.checkpoint import save_ckpt, load_ckpt
    from torch.optim.lr_scheduler import ReduceLROnPlateau
    from transformers import get_linear_schedule_with_warmup
    from model.tokenizer import get_tokenizer
    
    # Automatic configuration for different model sizes
    AUTO_CONFIG = {
        "0.5B":  dict(batch_size=4,  accum=8,  seq_len=384,  force_quant=True, force_lora=False, lr=3e-5),
        "1.5B":  dict(batch_size=2,  accum=16, seq_len=512,  force_quant=True, force_lora=False, lr=2e-5),
        "7B":    dict(batch_size=1,  accum=32, seq_len=384,  force_quant=True, force_lora=True,  lr=2e-5),
        "32B":   dict(batch_size=1,  accum=64, seq_len=256,  force_quant=True, force_lora=True,  lr=1e-5),
        "64B":   dict(batch_size=1,  accum=64, seq_len=192,  force_quant=True, force_lora=True,  lr=1e-5),
        "70B":   dict(batch_size=1,  accum=64, seq_len=128,  force_quant=True, force_lora=True,  lr=8e-6),
    }
    # Get the model size from arguments, default to "0.5B"
    model_size = getattr(args, 'model_size', '0.5B').upper()
    if model_size not in AUTO_CONFIG:
        print(f"❌ Unsupported model_size: {model_size}")
        sys.exit(1)
    # Get the configuration dictionary for the specified model size
    cfg_dict = AUTO_CONFIG[model_size]
    batch_size = cfg_dict['batch_size']
    accum = cfg_dict['accum']
    seq_len = cfg_dict['seq_len']
    force_quant = cfg_dict['force_quant']
    force_lora = cfg_dict['force_lora']
    epochs = 1
    lr = cfg_dict['lr']
    save_dir = "ckpt"
    
    # Minimum epoch number before enabling the ReduceLROnPlateau scheduler
    min_plateau_epoch = 5
    scheduler = None
    
    data_cache_dir = "data_cache"
    model_txt = os.path.join(data_cache_dir, "model.txt")
    if not os.path.exists(model_txt):
        print(f"❌\t{model_txt} not found! Please create it with one dataset name per line.")
        sys.exit(1)
    # Read dataset names from the file
    with open(model_txt, "r", encoding="utf-8") as f:
        dataset_list = [line.strip() for line in f if line.strip() and not line.strip().startswith('#')]
    if not dataset_list:
        print(f"❌\tNo dataset names found in {model_txt}!")
        sys.exit(1)
    if args.distributed:
        # Initialize the distributed training process group
        dist.init_process_group(backend='nccl')
        local_rank = args.local_rank
        torch.cuda.set_device(local_rank)
        device = torch.device('cuda', local_rank)
        print(f"✅\tUsing distributed training. Local rank: {local_rank}")
    else:
        # Set up the device automatically
        device = setup_device("auto")
    print(f"✅\tDevice set: {device}")
    print("✅\tLoading PiscesConfig...")
    config = f"configs/{model_size}.json"
    if not os.path.exists(config):
        print(f"❌\tConfig file {config} not found. Please provide a valid --model_size.")
        sys.exit(1)
    print(f"✅\tLoading config file: {config}")
    # Load the model configuration from the JSON file
    cfg = PiscesConfig.from_json(config)
    print("✅\tPiscesConfig loaded.")

    print("✅\tInitializing PiscesModel with Reasoner...")
    
    # Always-on Reasoner: Tokenizer setup
    tokenizer = get_tokenizer()
    special_tokens = ["<think>", "</think>"]
    tokenizer.add_tokens(special_tokens)

    model = None
    if force_quant or force_lora:
        from transformers import BitsAndBytesConfig
        from peft import get_peft_model, LoraConfig, TaskType
        quant_config = None
        if force_quant:
            # Configure quantization for the model
            quant_config = BitsAndBytesConfig(
                load_in_4bit=True,
                bnb_4bit_compute_dtype=torch.bfloat16,
                bnb_4bit_quant_type="nf4",
                bnb_4bit_use_double_quant=True
            )
        model = PiscesModel(cfg, quantization_config=quant_config) if quant_config else PiscesModel(cfg)
        if force_lora:
            # Configure LoRA for the model
            lora_config = LoraConfig(
                r=8, lora_alpha=32, target_modules=["q_proj", "v_proj", "o_proj"],
                lora_dropout=0.05, bias="none", task_type=TaskType.CAUSAL_LM
            )
            lora_model = get_peft_model(model, lora_config)
            # Assign custom properties/methods of PiscesModel back to LoRA model
            for attr in ["cfg", "quantization_config", "lora_config", "forward", "prepare_inputs_for_generation"]:
                if hasattr(model, attr):
                    setattr(lora_model, attr, getattr(model, attr))
            model = lora_model
            try:
                model.print_trainable_parameters()
            except Exception:
                pass
    else:
        # Initialize the model without quantization or LoRA
        model = PiscesModel(cfg)

    # Always-on Reasoner: Model and Reasoner setup
    # Resize the token embeddings to accommodate new tokens
    model.resize_token_embeddings(len(tokenizer))
    start_id = tokenizer.encoder.get("<think>")
    end_id = tokenizer.encoder.get("</think>")
    if start_id is None or end_id is None:
        raise ValueError("Special reasoning tokens could not be added to the tokenizer.")
    model.reasoner.start_thinking_id = start_id
    model.reasoner.end_thinking_id = end_id
    print(f"✅\tReasoner is integral and configured with token IDs: start={start_id}, end={end_id}")
    
    if hasattr(model, 'gradient_checkpointing_enable'):
        # Disable gradient checkpointing to avoid _StopRecomputationError
        # model.gradient_checkpointing_enable()
        print(f"🟧\tGradient checkpointing disabled to avoid expert network computation errors")
        
    # Move the model to the specified device
    model = model.to(device)
    print("✅\tPiscesModel initialized.")
    if args.distributed:
        # Wrap the model with DistributedDataParallel for distributed training
        model = DDP(model, device_ids=[local_rank])
    elif torch.cuda.is_available() and torch.cuda.device_count() > 1:
        print(f"✅\tUsing {torch.cuda.device_count()} GPUs with DataParallel")
        # Wrap the model with DataParallel for multi-GPU training
        model = torch.nn.DataParallel(model)
    print("✅\tInitializing optimizer and scheduler...")
    # Initialize the optimizer
    optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
    print("✅\tOptimizer and scheduler ready.")
    resume_ckpt = getattr(args, 'resume_ckpt', None)
    start_epoch = 0
    if resume_ckpt and os.path.exists(resume_ckpt):
        print(f"✅\tResuming from checkpoint: {resume_ckpt}")
        # Load the model and optimizer from the checkpoint
        start_epoch = load_ckpt(resume_ckpt, model, optimizer)
        print(f"✅ Resumed at epoch {start_epoch}")

        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
            print(f"✅ Learning rate auto-reset to {lr}")
        min_lr_threshold = lr * 0.5
        for param_group in optimizer.param_groups:
            if param_group['lr'] < min_lr_threshold:
                param_group['lr'] = lr
                print(f"✅\tLearning rate auto-reset to {lr}")
    for dataset in dataset_list:
        print(f"\n==============================")
        print(f"✅\tTraining dataset: {dataset}")
        print(f"✅\tBatch size: {batch_size}, Epochs: {epochs}, LR: {lr}")
        cache_path = os.path.join(data_cache_dir, dataset)
        if not os.path.exists(cache_path):
            print(f"❌\tLocal dataset not found: {cache_path}")
            continue
        # Load the training dataset
        train_ds = PiscesDataset(subset=dataset, split="train", config=cfg)
        if len(train_ds) == 0:
            print(f"🟧\tWarning: Dataset '{dataset}' is empty after filtering. Skipping.")
            continue
        print(f"✅\tDataset loaded successfully, size: {len(train_ds)}")
        print("✅\tCreating DataLoader...")
        # Create a sampler for distributed training if enabled
        sampler = DistributedSampler(train_ds) if args.distributed else None
        shuffle = not args.distributed and sampler is None
        train_loader = DataLoader(
            train_ds,
            batch_size=batch_size,
            sampler=sampler,
            shuffle=shuffle,
            num_workers=4,
            pin_memory=True,
            drop_last=True,
            collate_fn=collate_fn,
        )
        print("✅\tDataLoader created successfully")
        os.makedirs(save_dir, exist_ok=True)
        print("✅\tStarting training loop...")
        # Set the model to training mode
        model.train()
        # Initialize the gradient scaler if using CUDA
        scaler = torch.amp.GradScaler('cuda') if torch.cuda.is_available() else None
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats()
        
        # Enable CUDA debugging for better error messages
        os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
        stop_training = False
        epoch = start_epoch
        try:
            while not stop_training:
                print(f"🟧\tStarting epoch {epoch+1}")
                if args.distributed:
                    # Set the epoch for the distributed sampler
                    sampler.set_epoch(epoch)
                total_loss = 0
                accum_counter = 0
                optimizer.zero_grad()
                for step, batch in enumerate(train_loader):
                    model_keys = ["input_ids", "labels", "correct"]
                    # Move the batch data to the specified device
                    device_batch = {
                        k: v.to(device, non_blocking=True) if isinstance(v, torch.Tensor) else v
                        for k, v in batch.items() if k in model_keys and v is not None
                    }
                    loss = None
                    if scaler is not None:
                        with torch.amp.autocast('cuda'):
                            outputs = model(**device_batch)
                            loss = outputs.get("loss")

                        if loss is not None and loss.requires_grad:
                            if torch.cuda.device_count() > 1:
                                loss = loss.mean()
                            
                            # Scale the loss and perform backpropagation
                            scaler.scale(loss / accum).backward()
                            
                            accum_counter += 1
                            if accum_counter % accum == 0:
                                # Clip gradients and update the model parameters
                                torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
                                scaler.step(optimizer)
                                scaler.update()
                                optimizer.zero_grad()
                                accum_counter = 0
                        else:
                            print(f"🟧\tWarning: Skipping step {step} due to invalid loss (None or no grad).")
                            
                    else: # Non-scaler path
                        outputs = model(**device_batch)
                        loss = outputs.get("loss")
                        
                        if loss is not None and loss.requires_grad:
                            if torch.cuda.device_count() > 1:
                                loss = loss.mean()
                            # Perform backpropagation
                            loss.backward()

                            accum_counter += 1
                            if accum_counter % accum == 0:
                                # Clip gradients and update the model parameters
                                torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
                                optimizer.step()
                                optimizer.zero_grad()
                                accum_counter = 0
                        else:
                            print(f"🟧\tWarning: Skipping step {step} due to invalid loss (None or no grad).")

                    if loss is not None:
                        total_loss += loss.item() * accum
                        
                        if epoch+1 > min_plateau_epoch and scheduler is not None:
                            # Update the learning rate scheduler
                            scheduler.step(loss.item())
                        if step % 10 == 0:
                            avg_loss = total_loss / (step + 1)
                            print(f"✅\tEpoch {epoch + 1} | Step {step} | Loss: {avg_loss:.4f} | LR: {optimizer.param_groups[0]['lr']:.2e}")
                
                if not train_loader:
                    print(f"🟧\tSkipping epoch end logic for empty loader.")
                    continue

                avg_loss = total_loss / (step + 1)
                checkpoint_path = f"{save_dir}/pisces_{dataset}_epoch{epoch + 1}.pt"
                if not args.distributed or (args.distributed and local_rank == 0):
                    # Save the model and optimizer checkpoint
                    save_ckpt(model, optimizer, epoch + 1, checkpoint_path)
                print(f"✅\tCheckpoint saved: {checkpoint_path}")
                
                if epoch+1 == min_plateau_epoch:
                    from torch.optim.lr_scheduler import ReduceLROnPlateau
                    # Initialize the ReduceLROnPlateau scheduler
                    scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2, verbose=True, min_lr=1e-6)
                    print(f"✅\tReduceLROnPlateau scheduler enabled after {min_plateau_epoch} epochs.")
                if avg_loss < 1.0:
                    print(f"✅\tLoss < 1.0, stopping training for dataset {dataset}.")
                    stop_training = True
                else:
                    epoch += 1
        except KeyboardInterrupt:
            print("❌\tTraining interrupted by user (Ctrl-C). Saving checkpoint...")
            interrupt_ckpt = f"{save_dir}/latest_interrupt.pt"
            # Save the model and optimizer checkpoint when interrupted
            save_ckpt(model, optimizer, epoch + 1, interrupt_ckpt)
            print(f"✅\tCheckpoint saved: {interrupt_ckpt}")
            print(f"✅\tYou can resume training with:")
            print(f"    python manage.py train --model_size {model_size} --resume_ckpt {interrupt_ckpt}")
            sys.exit(0)
        print("✅\tTraining completed!")

    if not args.distributed or (args.distributed and local_rank == 0):
        final_weight_path = os.path.join(save_dir, f"pisces-l1-{model_size.lower()}-final.pt")
    if hasattr(model, "module"):  # DataParallel
        # Save the model weights
        torch.save(model.module.state_dict(), final_weight_path)
    else:
        torch.save(model.state_dict(), final_weight_path)
    print(f"✅\tAll datasets finished. Final model weights saved to: {final_weight_path}")

def add_train_args(parser):
    """
    Add training-related arguments to the argument parser.

    Args:
        parser (argparse.ArgumentParser): The argument parser to add arguments to.

    Returns:
        argparse.ArgumentParser: The updated argument parser.
    """
    parser.add_argument('--model_size', default='0.5B', type=str, help='Model size, e.g. 0.5B, 1.5B, 7B, 70B')
    parser.add_argument('--resume_ckpt', default='', type=str, help='Path to checkpoint to resume training')
    parser.add_argument('--reset_lr', action='store_true', help='Reset learning rate after resuming checkpoint')
    parser.add_argument('--local_rank', type=int, default=-1, help='Local rank for distributed training')
    parser.add_argument('--distributed', action='store_true', help='Enable distributed training')
    return parser