import argparse
import json
import os
import torch
from typing import List, Dict
from attack import BertAttack
from dataset import create_imdb_dataloader

# Set CUDA device
os.environ['CUDA_VISIBLE_DEVICES'] = '6'

class Config:
    def __init__(self):
        self.model_name = 'bert-base-uncased'
        self.batch_size = 32
        self.max_length = 128
        self.data_path = 'data.json'
        self.output_path = 'results.json'

    @classmethod
    def from_args(cls):
        parser = argparse.ArgumentParser(description='BERT Attack on IMDB Dataset')
        parser.add_argument('--model', type=str, default='bert-base-uncased', 
                          help='BERT model name (default: bert-base-uncased)')
        parser.add_argument('--num_epoch', type=int, default=50, 
                          help='Epoch number for training (default: 50)')
        parser.add_argument('--batch_size', type=int, default=32, 
                          help='Batch size for processing (default: 32)')
        parser.add_argument('--max_length', type=int, default=128, 
                          help='Maximum sequence length (default: 128)')
        parser.add_argument('--data_path', type=str, default='databases/imdb', 
                          help='Path to data file or IMDB dataset directory (default: data.json)')
        parser.add_argument('--output_path', type=str, default='results.json', 
                          help='Output file path (default: results.json)')
        parser.add_argument('--split', type=str, default='test', choices=['train', 'test'],
                          help='Dataset split to use (train/test, default: test)')
        parser.add_argument('--max_samples', type=int, default=100,
                          help='Maximum number of samples to process (default: 100)')
        args = parser.parse_args()
        
        config = cls()
        config.model_name = args.model
        config.batch_size = args.batch_size
        config.num_epoch = args.num_epoch
        config.max_length = args.max_length
        config.data_path = args.data_path
        config.output_path = args.output_path
        config.split = args.split
        config.max_samples = args.max_samples
        return config


class ASREvaluator:
    """Class to track and evaluate Attack Success Rate (ASR)"""
    
    def __init__(self):
        self.total_attacks = 0
        self.successful_attacks = 0
        self.results = []
    
    def evaluate(self, result, target_label, attacker):
        """
        Evaluate if an attack was successful
        
        Args:
            result: Either a tuple (adversarial_text, success_count) from attack,
                    or just adversarial_text string if no attack was needed
            target_label: Target label the attack was trying to achieve
            attacker: BertAttack instance for model prediction
            
        Returns:
            bool: True if attack was successful, False otherwise
        """
        # Handle both cases: tuple (adversarial_text, success_count) or just adversarial_text
        if isinstance(result, tuple) and len(result) == 2:
            adversarial_text, success_count = result
        else:
            # If result is just a string (no attack needed), treat it as adversarial text with 0 success
            adversarial_text = result
            success_count = 0
        
        # Get prediction for adversarial text
        with torch.no_grad():
            inputs = attacker.tgt_tokenizer(adversarial_text, return_tensors="pt", 
                                          truncation=True, max_length=512).to(attacker.device)
            output = attacker.tgt_model(**inputs)
            pred_prob = torch.softmax(output.logits, dim=-1)
            pred_label = torch.argmax(pred_prob, dim=-1).item()
        
        # Check if attack was successful
        is_successful = (pred_label == target_label)
        
        # Update statistics
        self.total_attacks += 1
        if is_successful:
            self.successful_attacks += 1
        
        # Store result details
        result_info = {
            'adversarial_text': adversarial_text,
            'target_label': target_label,
            'predicted_label': pred_label,
            'is_successful': is_successful,
            'success_count': success_count,
            'confidence': pred_prob[0][pred_label].item()
        }
        self.results.append(result_info)
        
        return is_successful
    
    def get_asr(self):
        """Calculate and return Attack Success Rate"""
        if self.total_attacks == 0:
            return 0.0
        return (self.successful_attacks / self.total_attacks) * 100
    
    def evaluate_asr(self, result, target_label, attacker):
        """
        Evaluate Attack Success Rate for a single attack result and print status
        
        Args:
            result: Tuple (adversarial_text, success_count) from attack
            target_label: Target label the attack was trying to achieve
            attacker: BertAttack instance for model prediction
        """
        is_successful = self.evaluate(result, target_label, attacker)
        
        if is_successful:
            print(' ✓ SUCCESS')
        else:
            print(' ✗ FAILED')
    
    def print_summary(self):
        """Print ASR summary statistics"""
        asr = self.get_asr()
        print(f"\n=== ASR Evaluation Summary ===")
        print(f"Total attacks: {self.total_attacks}")
        print(f"Successful attacks: {self.successful_attacks}")
        print(f"Attack Success Rate (ASR): {asr:.2f}%")
        
        # Print details of successful attacks
        print(f"\nSuccessful attacks details:")
        for i, result in enumerate(self.results):
            if result['is_successful']:
                print(f"  Attack {i+1}: Target={result['target_label']}, "
                      f"Predicted={result['predicted_label']}, "
                      f"Confidence={result['confidence']:.3f}")
        
        return asr


def main():
    config = Config.from_args()
    dataloader = create_imdb_dataloader(
        dataset_path=config.data_path, 
        split=config.split, 
        max_samples=config.max_samples, 
        batch_size=config.batch_size, 
        shuffle=False
    )
    attacker = BertAttack(config.model_name)
    results = []
    
    # Initialize ASR evaluator
    asr_evaluator = ASREvaluator()
    
    # for epoch in range(config.num_epoch):
    #     print(f"Epoch {epoch + 1}/{1}")
    for batch in dataloader:
        # Extract texts and target labels from batch
        texts = [item['text'] for item in batch]
        target_labels = [item['target_label'] for item in batch]
        
        # Process each text in the batch
        for i, text in enumerate(texts):
            attack_result = attacker.attack(text, target_labels[i])
            
            # Handle the return tuple (adversarial_text, success_count)
            if isinstance(attack_result, tuple) and len(attack_result) == 2:
                adversarial_text, success_count = attack_result
                
                if success_count > 0:
                    print('success')
                else:
                    # Check if text already had target label (no attack needed)
                    with torch.no_grad():
                        inputs = attacker.tgt_tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(attacker.device)
                        output = attacker.tgt_model(**inputs)
                        pred_label = torch.argmax(output.logits, dim=-1).item()
                    
                    if pred_label == target_labels[i]:
                        print('no_attack')
                    else:
                        print('failed')
            else:
                # Fallback handling
                adversarial_text = attack_result
                print('unknown_result')
            
            results.append(adversarial_text)
            
            # Evaluate ASR for this attack
            asr_evaluator.evaluate_asr(attack_result, target_labels[i], attacker)
    
    
   
    
    with open(config.output_path, 'w') as f:
        json.dump(results, f, indent=2)
    print(f"Results saved to {config.output_path}")
    
    # Print final ASR summary
    asr_evaluator.print_summary()

if __name__ == "__main__":
    main()
