import copy
import torch
from transformers import BertTokenizer, BertForSequenceClassification, BertForMaskedLM
from tqdm import tqdm
import numpy as np
import torch.nn as nn

# List of common words to filter out from substitutions (stop words, punctuation, etc.)
filter_words = ['a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against', 'ain', 'all', 'almost',
                'alone', 'along', 'already', 'also', 'although', 'am', 'among', 'amongst', 'an', 'and', 'another',
                'any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are', 'aren', "aren't", 'around', 'as',
                'at', 'back', 'been', 'before', 'beforehand', 'behind', 'being', 'below', 'beside', 'besides',
                'between', 'beyond', 'both', 'but', 'by', 'can', 'cannot', 'could', 'couldn', "couldn't", 'd', 'didn',
                "didn't", 'doesn', "doesn't", 'don', "don't", 'down', 'due', 'during', 'either', 'else', 'elsewhere',
                'empty', 'enough', 'even', 'ever', 'everyone', 'everything', 'everywhere', 'except', 'first', 'for',
                'former', 'formerly', 'from', 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'he', 'hence',
                'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself', 'him', 'himself', 'his',
                'how', 'however', 'hundred', 'i', 'if', 'in', 'indeed', 'into', 'is', 'isn', "isn't", 'it', "it's",
                'its', 'itself', 'just', 'latter', 'latterly', 'least', 'll', 'may', 'me', 'meanwhile', 'mightn',
                "mightn't", 'mine', 'more', 'moreover', 'most', 'mostly', 'must', 'mustn', "mustn't", 'my', 'myself',
                'namely', 'needn', "needn't", 'neither', 'never', 'nevertheless', 'next', 'no', 'nobody', 'none',
                'noone', 'nor', 'not', 'nothing', 'now', 'nowhere', 'o', 'of', 'off', 'on', 'once', 'one', 'only',
                'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours', 'ourselves', 'out', 'over', 'per',
                'please', 's', 'same', 'shan', "shan't", 'she', "she's", "should've", 'shouldn', "shouldn't", 'somehow',
                'something', 'sometime', 'somewhere', 'such', 't', 'than', 'that', "that'll", 'the', 'their', 'theirs',
                'them', 'themselves', 'then', 'thence', 'there', 'thereafter', 'thereby', 'therefore', 'therein',
                'thereupon', 'these', 'they', 'this', 'those', 'through', 'throughout', 'thru', 'thus', 'to', 'too',
                'toward', 'towards', 'under', 'unless', 'until', 'up', 'upon', 'used', 've', 'was', 'wasn', "wasn't",
                'we', 'were', 'weren', "weren't", 'what', 'whatever', 'when', 'whence', 'whenever', 'where',
                'whereafter', 'whereas', 'whereby', 'wherein', 'whereupon', 'wherever', 'whether', 'which', 'while',
                'whither', 'who', 'whoever', 'whole', 'whom', 'whose', 'why', 'with', 'within', 'without', 'won',
                "won't", 'would', 'wouldn', "wouldn't", 'y', 'yet', 'you', "you'd", "you'll", "you're", "you've",
                'your', 'yours', 'yourself', 'yourselves', '.', '-', 'a the', '/', '?', 'some', '"', ',', 'b', '&', '!',
                '@', '%', '^', '*', '(', ')', "-", '-', '+', '=', '<', '>', '|', ':', ";", '～', '·']
filter_words = set(filter_words)  # Convert to set for faster lookup

class BertAttack:
    """BERT-based adversarial text attack system that perturbs text to fool classification models"""
    
    def __init__(self, model_name='bert-base-uncased', class_model="JiaqiLee/imdb-finetuned-bert-base-uncased"):
        """
        Initialize BERT attack system with two models:
        - MLM model: BERT Masked Language Model for generating substitutions
        - Target model: Fine-tuned BERT classifier to attack
        
        Args:
            model_name: Base BERT model name for MLM
            model_path: Path to fine-tuned target model for classification
        """
        # Check if CUDA is available, otherwise use CPU
        if torch.cuda.is_available():
            self.device = torch.device('cuda')
            print("Using CUDA (GPU) for acceleration")
        else:
            self.device = torch.device('cpu')
            print("CUDA not available, using CPU instead")
        
        # Initialize MLM model for generating word substitutions
        self.tokenizer = BertTokenizer.from_pretrained(model_name)
        self.model = BertForMaskedLM.from_pretrained(model_name).to(self.device)
        self.model.eval()
        
        # Initialize target model to attack (fine-tuned classifier)
        self.tgt_tokenizer = BertTokenizer.from_pretrained(class_model)
        self.tgt_model = BertForSequenceClassification.from_pretrained(class_model, num_labels=2).to(self.device)
        self.tgt_model.eval()
        
        # Attack configuration parameters
        self.topk = 10  # Number of top candidates to consider for substitution
        self.num_perturbation = 10  # Maximum number of word perturbations to apply
        self.threshold_pred_score = 0.3  # Score threshold for considering substitution candidates
        self.max_length = 512  # Maximum sequence length for BERT processing
        self.max_attempts_per_word = 3  # Maximum substitution attempts per important word

    def get_important_scores(self, text, target_label):
        """
        Compute importance scores for each word using ablation analysis.
        Masks each word and measures how much prediction changes for target label.
        
        Args:
            text: Input text to analyze
            target_label: Target label (0 or 1) to compute importance for
            
        Returns:
            Dictionary mapping word indices to their importance scores (higher = more important)
        """
        # Get original prediction from target model
        original_input = self.tgt_tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(self.device)
        original_output = self.tgt_model(**original_input)
        original_prob = torch.softmax(original_output.logits, dim=-1).detach()
        
        # Split text into words for masking
        words = text.split(' ')
        importance_scores = []
        
        # Create versions with each word masked out
        for i in range(len(words)):
            if len(words[i].strip()) == 0:
                importance_scores.append(0.0)
                continue
                
            masked_words = words.copy()
            masked_words[i] = '[MASK]'  # Replace one word at a time
            masked_text = ' '.join(masked_words)
            
            # Get prediction for masked text
            masked_input = self.tgt_tokenizer(masked_text, return_tensors="pt", truncation=True, max_length=512).to(self.device)
            masked_output = self.tgt_model(**masked_input)
            masked_prob = torch.softmax(masked_output.logits, dim=-1).detach()
            
            # Compute importance as the change in target probability
            # Higher change = more important word for target prediction
            importance = abs(original_prob[0][target_label] - masked_prob[0][target_label])
            importance_scores.append(importance.item())
        
        # Return dictionary mapping word indices to importance scores
        return {i: score for i, score in enumerate(importance_scores)}

    def attack(self, text, target_label):
        """
        Main attack method: Perturbs text to make target model predict desired label.
        
        Strategy:
        1. Get MLM predictions for substitution candidates
        2. Compute word importance scores
        3. Iteratively replace important words with MLM suggestions
        4. Test if substitution achieves target prediction
        
        Args:
            text: Input text to attack
            target_label: Desired classification label (0 or 1)
            
        Returns:
            Tuple: (adversarial_text, success_count)
        """
        
        # First check if text already has target label
        with torch.no_grad():
            inputs = self.tgt_tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(self.device)
            tgt_logits = self.tgt_model(**inputs).logits
            tgt_labels = torch.argmax(tgt_logits, dim=-1)
        
        # Check if text already has target label (no attack needed)
        if tgt_labels[0] == target_label:
            return text, 0  # No attack needed, return original text with success_count=0
        
        # Get importance scores to identify most influential words
        important_scores = self.get_important_scores(text, target_label)
        
        # Sort words by importance (most important first)
        words = text.split(' ')
        sorted_indices = sorted(important_scores.items(), key=lambda x: x[1], reverse=True)
        
        final_words = words.copy()
        success_count = 0
        attempts_per_word = {}
        
        # Iterate through important words and try substitutions
        for word_idx, importance_score in sorted_indices:
            if success_count >= self.num_perturbation:
                break
                
            if word_idx >= len(words):
                continue
                
            tgt_word = words[word_idx]
            
            # Skip filtered words, empty words, and very short words
            if (tgt_word in filter_words or len(tgt_word.strip()) == 0 or 
                len(tgt_word) <= 2 or not tgt_word.isalpha()):
                continue
            
            # Track attempts per word
            if word_idx not in attempts_per_word:
                attempts_per_word[word_idx] = 0
            
            if attempts_per_word[word_idx] >= self.max_attempts_per_word:
                continue
            
            # Create masked text for MLM prediction
            masked_words = words.copy()
            masked_words[word_idx] = '[MASK]'
            masked_text = ' '.join(masked_words)
            
            # Get MLM predictions for the masked position
            with torch.no_grad():
                inputs = self.tokenizer(masked_text, return_tensors="pt", truncation=True, max_length=512).to(self.device)
                outputs = self.model(**inputs)
                predictions = outputs.logits
                
                # Get predictions for the masked token position
                mask_token_index = torch.where(inputs["input_ids"] == self.tokenizer.mask_token_id)[1]
                if len(mask_token_index) == 0:
                    continue  # No mask token found
                    
                mask_logits = predictions[0, mask_token_index, :]
                top_k_tokens = torch.topk(mask_logits, self.topk, dim=-1).indices[0]
            
            # Convert token IDs to words and filter properly
            candidate_words = []
            candidate_scores = []
            
            for token_id in top_k_tokens:
                token = self.tokenizer.decode(token_id).strip()
                
                # Skip invalid tokens
                if (token.startswith('##') or 
                    token in [self.tokenizer.unk_token, self.tokenizer.pad_token, 
                             self.tokenizer.cls_token, self.tokenizer.sep_token, '[MASK]'] or
                    not token.isalpha() or len(token) <= 1):
                    continue
                
                # Skip if same as original word (case insensitive)
                if token.lower() == tgt_word.lower():
                    continue
                    
                # Skip filtered words
                if token.lower() in filter_words:
                    continue
                
                candidate_words.append(token)
            
            # Remove duplicates while preserving order
            seen = set()
            unique_candidates = []
            for cand in candidate_words:
                if cand not in seen:
                    seen.add(cand)
                    unique_candidates.append(cand)
            
            # Test each candidate substitution
            for candidate in unique_candidates:
                if attempts_per_word[word_idx] >= self.max_attempts_per_word:
                    break
                    
                # Create text with substitution
                temp_words = final_words.copy()
                temp_words[word_idx] = candidate
                candidate_text = ' '.join(temp_words)
                
                # Test if substitution achieves target label
                with torch.no_grad():
                    candidate_input = self.tgt_tokenizer(candidate_text, return_tensors="pt", 
                                                       truncation=True, max_length=512).to(self.device)
                    candidate_output = self.tgt_model(**candidate_input)
                    candidate_probs = torch.softmax(candidate_output.logits, dim=-1)
                    candidate_label = torch.argmax(candidate_probs, dim=-1).item()
                    candidate_confidence = candidate_probs[0][candidate_label].item()
                
                attempts_per_word[word_idx] += 1
                
                if candidate_label == target_label and candidate_confidence > 0.6:
                    final_words[word_idx] = candidate
                    success_count += 1
                    break  # Move to next word if successful
            
            if success_count >= self.num_perturbation:
                break
        
        # Convert back to text and clean up any potential token artifacts
        adversarial_text = ' '.join(final_words)
        
        # Remove any remaining special tokens that might have been introduced
        adversarial_text = adversarial_text.replace('[PAD]', '').replace('[unused0]', '')
        adversarial_text = ' '.join(adversarial_text.split())  # Normalize whitespace
        
        return adversarial_text, success_count
    
    def _tokenize(self, text):
        """
        Custom tokenization that preserves word boundaries and tracks token positions.
        
        Returns:
            words: Original words split by space
            sub_words: BERT subword tokens
            keys: List of [start_index, end_index] for each word in sub_words
        """
        words = text.split(' ')

        sub_words = []
        keys = []
        index = 0
        for word in words:
            sub = self.tokenizer.tokenize(word)  # Split into BERT subwords
            sub_words += sub
            keys.append([index, index + len(sub)])  # Track word boundaries in subword list
            index += len(sub)

        return words, sub_words, keys
    
def get_substitues(substitutes, tokenizer, mlm_model, use_bpe, substitutes_score=None, threshold=3.0):
    """
    Generate word substitution candidates from MLM predictions.
    
    This function converts token IDs to actual words, filtering by score threshold
    and handling different substitution scenarios (single token vs multi-token words).
    
    Args:
        substitutes: Tensor of token IDs (shape: [sub_len, k]) from MLM predictions
        tokenizer: BERT tokenizer for ID-to-token conversion
        mlm_model: BERT MLM model for BPE substitution generation
        use_bpe: Flag to use BPE-based substitution (1 = use BPE, 0 = simple conversion)
        substitutes_score: Tensor of prediction scores for each candidate
        threshold: Minimum score threshold for candidate acceptance
        
    Returns:
        List of valid substitution words
    """
    words = []
    sub_len, k = substitutes.size()  # sub-len, k

    if sub_len == 0:
        return words  # No substitutions available
        
    elif sub_len == 1:
        # Single token case - simple conversion with score filtering
        for (token_id, score) in zip(substitutes[0], substitutes_score[0]):
            if threshold != 0 and score < threshold:
                break  # Stop when scores drop below threshold
            words.append(tokenizer._convert_id_to_token(int(token_id)))
    else:
        # Multi-token case - use BPE-based substitution if enabled
        if use_bpe == 1:
            words = get_bpe_substitues(substitutes, tokenizer, mlm_model)
        else:
            return words  # Return empty if BPE not enabled for multi-token
    
    return words


def get_bpe_substitues(substitutes, tokenizer, mlm_model):
    """
    Generate BPE (Byte Pair Encoding) substitution candidates for multi-token words.
    
    This function handles cases where a word is split into multiple BPE tokens.
    It generates all possible combinations of top candidates and ranks them by perplexity.
    
    Args:
        substitutes: Tensor of token IDs (shape: [L, k]) from MLM predictions
        tokenizer: BERT tokenizer for ID-to-token conversion
        mlm_model: BERT MLM model for perplexity calculation
        
    Returns:
        List of best substitution candidates ranked by perplexity
    """
    # Limit to top 12 positions and top 4 candidates per position for efficiency
    substitutes = substitutes[0:12, 0:4]  # maximum BPE candidates

    # Generate all possible candidate combinations using Cartesian product
    all_substitutes = []
    for i in range(substitutes.size(0)):
        if len(all_substitutes) == 0:
            # First level: create initial list of single-token candidates
            lev_i = substitutes[i]
            all_substitutes = [[int(c)] for c in lev_i]
        else:
            # Subsequent levels: combine with previous candidates
            lev_i = []
            for all_sub in all_substitutes:
                for j in substitutes[i]:
                    lev_i.append(all_sub + [int(j)])  # Append new token to existing sequence
            all_substitutes = lev_i

    # Calculate perplexity for each candidate sequence to find best substitutions
    c_loss = nn.CrossEntropyLoss(reduction='none')
    word_list = []
    
    # Convert to tensor and limit to top 24 candidates for efficiency
    all_substitutes = torch.tensor(all_substitutes)  # [N, L]
    all_substitutes = all_substitutes[:24].to(mlm_model.device)  # Limit to 24 candidates, use same device as model
    
    # Get MLM predictions for all candidate sequences
    N, L = all_substitutes.size()
    word_predictions = mlm_model(all_substitutes)[0]  # N L vocab-size
    
    # Calculate perplexity (lower perplexity = better substitution)
    ppl = c_loss(word_predictions.view(N*L, -1), all_substitutes.view(-1))  # [N*L]
    ppl = torch.exp(torch.mean(ppl.view(N, L), dim=-1))  # N (average perplexity per candidate)
    
    # Sort candidates by perplexity (lowest first = best substitutions)
    _, word_list = torch.sort(ppl)
    word_list = [all_substitutes[i] for i in word_list]
    
    # Convert token IDs back to actual words
    final_words = []
    for word in word_list:
        tokens = [tokenizer._convert_id_to_token(int(i)) for i in word]
        text = tokenizer.convert_tokens_to_string(tokens)
        final_words.append(text)
    
    return final_words

if __name__ == "__main__":
    """
    Main execution block demonstrating BERT adversarial attack functionality.
    
    This demonstrates how to use the BertAttack class to generate adversarial examples
    that fool a fine-tuned BERT classifier on IMDB movie reviews.
    """
    # Initialize the BERT attack system
    attacker = BertAttack()
    print("BERT attack model initialized successfully")
    
    # Example text to attack (IMDB movie review snippet)
    example_text = "This movie was absolutely fantastic! The acting was superb and the storyline was engaging from start to finish."
    
    # Target label (0 = negative, 1 = positive)
    # Let's try to make the model predict the opposite of what it would normally predict
    print(f"\nOriginal text: {example_text}")
    
    # First, check what the target model predicts for the original text
    with torch.no_grad():
        inputs = attacker.tgt_tokenizer(example_text, return_tensors="pt", truncation=True, max_length=512).to(attacker.device)
        original_output = attacker.tgt_model(**inputs)
        original_prob = torch.softmax(original_output.logits, dim=-1)
        original_label = torch.argmax(original_prob, dim=-1).item()
    
    print(f"Original prediction: {'Positive' if original_label == 1 else 'Negative'} "
          f"(confidence: {original_prob[0][original_label].item():.3f})")
    
    # Attack to flip the prediction
    target_label = 1 - original_label  # Flip the label
    print(f"Target label: {'Positive' if target_label == 1 else 'Negative'}")
    
    # Generate adversarial example
    print("\nGenerating adversarial example...")
    adversarial_text = attacker.attack(example_text, target_label)
    
    # Verify the attack was successful
    adv_inputs = attacker.tgt_tokenizer(adversarial_text, return_tensors="pt", truncation=True, max_length=512).to(attacker.device)
    adv_output = attacker.tgt_model(**adv_inputs)
    adv_prob = torch.softmax(adv_output.logits, dim=-1)
    adv_label = torch.argmax(adv_prob, dim=-1).item()
    
    print(f"\nAdversarial text: {adversarial_text}")
    print(f"Adversarial prediction: {'Positive' if adv_label == 1 else 'Negative'} "
          f"(confidence: {adv_prob[0][adv_label].item():.3f})")
    
    if adv_label == target_label:
        print("✓ Attack successful! Model prediction flipped.")
    else:
        print("✗ Attack failed. Model prediction not changed.")
