from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer
from torch.utils.data import Dataset
import torch
from tqdm import tqdm
import os
import re

# Set environment variables for multi-GPU usage
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5,6"

class QADataset(Dataset):
    def __init__(self, file_path, tokenizer, block_size=2048):
        print("Initializing QADataset...")
        self.examples = []
        self.tokenizer = tokenizer
        self.block_size = block_size
        
        # Validate training data quality first
        self._validate_training_file(file_path)
        
        # Load and process training data
        with open(file_path, encoding="utf-8") as f:
            text = f.read()
        
        # Split conversations by end token
        conversations = text.split("<|eot_id|>")
        conversations = [conv.strip() for conv in conversations if conv.strip()]
        
        print(f"Found {len(conversations)} raw conversations")
        
        # Process each conversation
        valid_count = 0
        skipped_count = 0
        
        for i, conv in enumerate(tqdm(conversations)):
            if not conv.strip():
                continue
                
            # Skip poor quality examples
            if self._is_poor_quality(conv):
                skipped_count += 1
                continue
                
            try:
                # Process conversation into training format
                example = self._process_conversation(conv)
                if example:
                    self.examples.append(example)
                    valid_count += 1
                    
            except Exception as e:
                print(f"Error processing conversation {i}: {e}")
                continue
        
        print(f"Created {valid_count} valid examples, skipped {skipped_count} poor quality")
        print(f"Final dataset size: {len(self.examples)}")
        
        # Ensure we have enough training data
        if len(self.examples) < 10:
            raise ValueError(f"Insufficient training data: only {len(self.examples)} examples")
    
    def _validate_training_file(self, file_path):
        """Validate training data quality"""
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"Training file {file_path} not found")
        
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
        
        # Count total examples and "don't know" responses
        assistant_count = content.count('<|start_header_id|>assistant<|end_header_id|>')
        dont_know_count = len(re.findall(r'i don\'?t know', content.lower()))
        
        if assistant_count == 0:
            raise ValueError("No valid assistant responses found in training data")
        
        dont_know_ratio = dont_know_count / assistant_count
        
        print(f"Training data quality check:")
        print(f"  Total assistant responses: {assistant_count}")
        print(f"  'Don't know' responses: {dont_know_count}")
        print(f"  'Don't know' ratio: {dont_know_ratio:.1%}")
        
        # Warn if too many "don't know" responses
        if dont_know_ratio > 0.4:
            print("WARNING: High ratio of 'don't know' responses detected!")
            print("This will cause poor model performance.")
    
    def _is_poor_quality(self, conversation):
        """Filter out poor quality training examples"""
        conv_lower = conversation.lower()
        
        # Skip if assistant only says "i don't know"
        assistant_marker = '<|start_header_id|>assistant<|end_header_id|>'
        if assistant_marker in conversation:
            # Extract assistant response
            parts = conversation.split(assistant_marker)
            if len(parts) > 1:
                response = parts[1].strip()
                
                # Check for various poor quality patterns
                poor_patterns = [
                    r'^\s*i don\'?t know\s*$',              # Only "don't know"
                    r'^\s*invalid question\s*$',            # Only "invalid question"
                    r'^\s*i cannot\s*$',                     # Only "i cannot"
                    r'i don\'?t know.*i don\'?t know',       # Repeated "don't know"
                    r'invalid.*invalid',                     # Repeated "invalid"
                ]
                
                for pattern in poor_patterns:
                    if re.search(pattern, response.lower()):
                        return True
                
                # Skip very short responses (less than 5 words)
                word_count = len(response.split())
                if word_count < 5:
                    return True
        
        return False
    
    def _process_conversation(self, conversation):
        """Process single conversation into training format"""
        assistant_marker = "<|start_header_id|>assistant<|end_header_id|>"
        
        if assistant_marker not in conversation:
            return None
        
        # Tokenize entire conversation
        full_text = conversation + "<|eot_id|>"  # Ensure proper ending
        
        tokenized = self.tokenizer.encode(
            full_text,
            add_special_tokens=True,
            max_length=self.block_size,
            truncation=True,
            padding='max_length'
        )
        
        # Find assistant response start position
        assistant_tokens = self.tokenizer.encode(assistant_marker, add_special_tokens=False)
        assistant_pos = self._find_sublist(tokenized, assistant_tokens)
        
        if assistant_pos == -1:
            return None
        
        # Create labels - only train on assistant responses
        labels = [-100] * len(tokenized)  # Ignore user parts
        response_start = assistant_pos + len(assistant_tokens)
        
        if response_start < len(tokenized):
            labels[response_start:] = tokenized[response_start:]
        
        return {
            "input_ids": torch.tensor(tokenized, dtype=torch.long),
            "labels": torch.tensor(labels, dtype=torch.long)
        }
    
    def _find_sublist(self, main_list, sublist):
        """Find sublist position in main list"""
        if not sublist:
            return -1
        sub_len = len(sublist)
        for i in range(len(main_list) - sub_len + 1):
            if main_list[i:i+sub_len] == sublist:
                return i
        return -1
    
    def __len__(self):
        return len(self.examples)
    
    def __getitem__(self, idx):
        return self.examples[idx]

# Custom data collator for proper batching
class DataCollator:
    def __init__(self, tokenizer):
        self.tokenizer = tokenizer
    
    def __call__(self, features):
        input_ids = torch.stack([f["input_ids"] for f in features])
        labels = torch.stack([f["labels"] for f in features])
        attention_mask = (input_ids != self.tokenizer.pad_token_id).long()
        
        return {
            "input_ids": input_ids,
            "labels": labels,
            "attention_mask": attention_mask
        }

def main():
    # Configuration
    model_name = "../Qwen3-8B"
    output_dir = "../Qwen3-523-peft-fixed"
    train_file_path = "train_523.txt"
    
    print(f"Model path: {model_name}")
    print(f"Training file: {train_file_path}")
    print(f"Output directory: {output_dir}")
    print(f"CUDA available: {torch.cuda.is_available()}")
    
    if torch.cuda.is_available():
        print(f"GPU count: {torch.cuda.device_count()}")
        for i in range(torch.cuda.device_count()):
            print(f"  GPU {i}: {torch.cuda.get_device_name(i)}")
    
    # Load tokenizer
    print("Loading tokenizer...")
    tokenizer = AutoTokenizer.from_pretrained(
        model_name,
        trust_remote_code=True,
        local_files_only=True,
        use_fast=False
    )
    
    # Setup special tokens
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    
    # Add Qwen special tokens if not already present
    special_tokens = [
        "<|start_header_id|>", "<|end_header_id|>", 
        "<|eot_id|>", "<|user|>", "<|assistant|>"
    ]
    
    new_tokens = []
    for token in special_tokens:
        if token not in tokenizer.get_vocab():
            new_tokens.append(token)
    
    if new_tokens:
        tokenizer.add_special_tokens({"additional_special_tokens": new_tokens})
        print(f"Added {len(new_tokens)} new special tokens")
    
    # Create dataset
    print("Creating training dataset...")
    dataset = QADataset(train_file_path, tokenizer, block_size=2048)
    
    # Split into train/validation (90/10)
    dataset_size = len(dataset)
    val_size = max(5, int(dataset_size * 0.1))  # At least 5 samples for validation
    train_size = dataset_size - val_size
    
    train_dataset, val_dataset = torch.utils.data.random_split(
        dataset, [train_size, val_size]
    )
    
    print(f"Train samples: {len(train_dataset)}")
    print(f"Validation samples: {len(val_dataset)}")
    
    # Load model
    print("Loading model...")
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        trust_remote_code=True,
        use_cache=False,
        device_map="auto",
        local_files_only=True,
        torch_dtype=torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16,
        low_cpu_mem_usage=True
    )
    
    # Resize embeddings if we added new tokens
    if new_tokens:
        model.resize_token_embeddings(len(tokenizer))
        print(f"Resized token embeddings to {len(tokenizer)}")
    
    # Setup LoRA
    from peft import LoraConfig, get_peft_model, TaskType
    
    peft_config = LoraConfig(
        task_type=TaskType.CAUSAL_LM,
        inference_mode=False,
        r=32,                    # Reduced rank to prevent overfitting
        lora_alpha=64,           # Alpha = 2 * rank
        lora_dropout=0.1,        # Higher dropout for regularization
        bias="none",
        target_modules=[
            "q_proj", "k_proj", "v_proj", "o_proj",
            "gate_proj", "up_proj", "down_proj"
        ]
    )
    
    model = get_peft_model(model, peft_config)
    model.print_trainable_parameters()
    
    # Training arguments
    training_args = TrainingArguments(
        output_dir=output_dir,
        
        # === Batch Size Configuration ===
        per_device_train_batch_size=1,          # Keep same as original
        per_device_eval_batch_size=1,
        gradient_accumulation_steps=2,          # Reduced from 8 to match original effective batch
        # Effective batch = 4 GPUs × 1 × 2 = 8 (same as original)
        
        # === Training Steps (Based on your specific adapter) ===
        # For web context model (like checkpoint-480):
        # max_steps=480,                          # Match original checkpoint
        
        # For API generation model (like checkpoint-500):
        max_steps=500,
        
        # For API extraction model (like checkpoint-580):
        # max_steps=580,
        
        # === Learning Configuration ===
        learning_rate=5e-5,                     # Conservative but effective
        lr_scheduler_type="cosine",
        warmup_ratio=0.1,                       # 10% warmup
        optim="adamw_torch",
        
        # === Logging and Evaluation ===
        logging_steps=20,                       # More frequent logging for shorter training
        eval_steps=80,                          # Evaluate 6 times during training (480/80=6)
        logging_first_step=True,
        
        # === Saving Strategy ===
        save_steps=160,                         # Save 3 checkpoints during training
        save_strategy="steps",
        save_total_limit=4,                     # Keep 4 checkpoints + final
        eval_strategy="steps",
        
        # === Model Selection ===
        load_best_model_at_end=True,
        metric_for_best_model="eval_loss",
        greater_is_better=False,
        
        # === Hardware Optimization ===
        bf16=torch.cuda.is_bf16_supported(),    # Use bf16 if available
        fp16=not torch.cuda.is_bf16_supported(),
        max_grad_norm=0.3,                      # Gradient clipping
        gradient_checkpointing=True,
        gradient_checkpointing_kwargs={"use_reentrant": False},
        
        # === Memory Optimization ===
        dataloader_pin_memory=False,            # Reduce memory usage
        dataloader_num_workers=1,               # Reduce from 2 to 1 for stability
        remove_unused_columns=False,
        
        # === Reporting ===
        report_to="none",                       # No external reporting
    )

    
    # Create trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=val_dataset,
        data_collator=DataCollator(tokenizer),
    )
    
    # Fix normalization layers precision
    for name, module in trainer.model.named_modules():
        if "norm" in name.lower() or "ln_" in name:
            module = module.to(torch.float32)
    
    print("Starting training...")
    model.print_trainable_parameters()
    
    # Clear GPU cache before training
    torch.cuda.empty_cache()
    
    try:
        # Start training
        trainer.train()
        print("Training completed successfully!")
        
        # Save final model
        final_dir = f"{output_dir}/checkpoint-500"
        trainer.model.save_pretrained(final_dir)
        tokenizer.save_pretrained(final_dir)
        
        print(f"Final model saved to: {final_dir}")
        
        # List all checkpoints
        checkpoints = [d for d in os.listdir(output_dir) if d.startswith('checkpoint-')]
        if checkpoints:
            checkpoints.sort(key=lambda x: int(x.split('-')[1]) if x.split('-')[1].isdigit() else 0)
            print("Available checkpoints:")
            for cp in checkpoints:
                print(f"  {output_dir}/{cp}")
        
    except Exception as e:
        print(f"Training failed: {e}")
        import traceback
        traceback.print_exc()
        
        # Save emergency checkpoint
        try:
            emergency_dir = f"{output_dir}/checkpoint-emergency"
            trainer.model.save_pretrained(emergency_dir)
            tokenizer.save_pretrained(emergency_dir)
            print(f"Emergency checkpoint saved to: {emergency_dir}")
        except:
            pass

if __name__ == "__main__":
    main()
