# -*- coding: utf-8 -*-
"""
@author: einsam
"""

# -*- coding: utf-8 -*-
"""
Modified on [Date] for improved performance, robustness and functionality

Changes:
1. Added device auto-detection and handling
2. Implemented embedding caching for performance
3. Added mixed-precision training support
4. Enhanced tokenization with detailed error handling
5. Added gradient clipping for stability
6. Integrated logging for better monitoring
7. Added type hints throughout
8. Improved docstrings for all methods
"""

from __future__ import absolute_import, division, print_function

import argparse
import glob
import logging
import os
from typing import List, Optional, Dict, Tuple, Union

import torch
from torch import nn
from transformers import (
    AutoModelForSequenceClassification,
    AutoTokenizer,
    PreTrainedTokenizer,
    PreTrainedModel,
    RobertaConfig
)
from torch.cuda.amp import autocast

# Configure logging
logger = logging.getLogger(__name__)
logging.basicConfig(
    format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
    datefmt='%m/%d/%Y %H:%M:%S',
    level=logging.INFO
)


class CodeBERTClassifier(nn.Module):
    """
    CodeBERT-based classifier for sequence classification tasks.

    Args:
        model_path (str): Path to pre-trained model or Hugging Face model identifier
        num_labels (int): Number of classification labels
        device (torch.device, optional): Device to run the model on.
            Automatically detects GPU if available when None.
    """

    def __init__(self, model_path: str, config:str, num_labels: int, device: torch.device = None):
        super(CodeBERTClassifier, self).__init__()

        # Device handling
        if device is None:
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.device = device
        logger.info(f"Using device: {self.device}")

        try:
            # 加载分词器
            self.tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained(model_path)

            # 创建模型实例
            self.model: PreTrainedModel = AutoModelForSequenceClassification.from_config(config)

            # 查找模型文件
            model_file = os.path.join(model_path, "model.bin")
            if not os.path.exists(model_file):
                model_file = model_path  # 如果没有model.bin，假设整个路径是模型文件

            if not os.path.exists(model_file):
                raise FileNotFoundError(f"Model file not found: {model_file}")

            logger.info(f"Loading model weights from: {model_file}")

            # 加载状态字典
            state_dict = torch.load(model_file, map_location=device)

            # 修复状态字典键名问题
            new_state_dict = {}
            for key, value in state_dict.items():
                # 移除 "encoder." 前缀
                if key.startswith("encoder."):
                    new_key = key[8:]  # 移除前8个字符 "encoder."
                    logger.debug(f"Renaming key: {key} -> {new_key}")
                    new_state_dict[new_key] = value
                else:
                    new_state_dict[key] = value

            # 加载修正后的状态字典
            self.model.load_state_dict(new_state_dict)

            # 将模型移动到设备
            self.model = self.model.to(self.device)
            # # Load tokenizer
            # self.tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained(model_path)
            #
            # # 创建模型实例然后加载权重
            # self.model: PreTrainedModel = AutoModelForSequenceClassification.from_config(config)
            # model_file = os.path.join(model_path, "model.bin")
            # if os.path.exists(model_file):
            #     self.model.load_state_dict(torch.load(model_file, map_location=device))
            # else:  # 如果直接是文件路径
            #     self.model.load_state_dict(torch.load(model_path, map_location=device))
            # # 修改到这里结束↑↑↑
            # self.model = self.model.to(self.device)

            # # Load model
            # self.model: PreTrainedModel = AutoModelForSequenceClassification.from_pretrained(
            #     model_path,
            #     num_labels=num_labels
            # ).to(self.device)

        except Exception as e:
            logger.error(f"Failed to load model/tokenizer from {model_path}: {e}")
            raise

        # Get embedding layer
        try:
            if hasattr(self.model, 'roberta'):
                self.embed = self.model.roberta.embeddings.word_embeddings
            elif hasattr(self.model, 'bert'):
                self.embed = self.model.bert.embeddings.word_embeddings
            elif hasattr(self.model, 'albert'):
                self.embed = self.model.albert.embeddings.word_embeddings
            else:
                # Fallback to generic method
                self.embed = self.model.get_input_embeddings()
        except AttributeError:
            logger.warning("Couldn't find specific embedding layer, using generic method")
            self.embed = self.model.get_input_embeddings()

        # Model properties
        self.vocab_size = self.embed.weight.size(0)
        self.x_size = self.embed.weight.size(-1)
        self.block_size = self.tokenizer.model_max_length or 512
        self.num_labels = num_labels

        # Embedding cache
        self.__embed_cache: Dict[Tuple, torch.Tensor] = {}

        logger.info(f"Model initialized with vocab_size={self.vocab_size}, block_size={self.block_size}")

    def _process_logits(self, logits: torch.Tensor) -> torch.Tensor:
        """处理二分类模型输出为统一的二元格式"""
        if self.num_labels == 1 and logits.shape[-1] == 1:
            # 转换为双logits格式
            probs = torch.sigmoid(logits)
            class0_logits = torch.log(torch.clamp(1 - probs, min=1e-10))
            class1_logits = torch.log(torch.clamp(probs, min=1e-10))
            return torch.cat([class0_logits, class1_logits], dim=-1)
        return logits  # 多分类直接返回

    def tokenize(self, inputs: Union[str, List[str], List[List[str]]],
                 cut_and_pad: bool = False,
                 ret_id: bool = False) -> Union[List[List[int]], List[List[str]]]:
        """
        Tokenize input strings or token lists.

        Args:
            inputs: Input strings or token lists
            cut_and_pad: Whether to pad to max length and truncate
            ret_id: Return token IDs instead of tokens

        Returns:
            List of tokenized results
        """
        try:
            rets = []
            if isinstance(inputs, list) and all(isinstance(item, list) for item in inputs):
                # Handle pre-tokenized input
                inputs = [" ".join(tokens) for tokens in inputs]

            for sent in inputs:
                # Tokenize using HF tokenizer
                encoding = self.tokenizer(
                    sent,
                    max_length=self.block_size,
                    padding='max_length' if cut_and_pad else False,
                    truncation=True,
                    return_tensors='pt'
                )

                if ret_id:
                    rets.append(encoding.input_ids.squeeze(0))
                else:
                    tokens = self.tokenizer.convert_ids_to_tokens(encoding.input_ids[0])
                    # Remove padding tokens
                    rets.append([t for t in tokens if t != self.tokenizer.pad_token])

            return rets
        except Exception as e:
            logger.error(f"Tokenization failed: {e}")
            raise

    def convert_to_two_class(self, outputs: torch.Tensor) -> torch.Tensor:
        """
        将单输出的二分类结果转换为双输出格式
        - 用于适配攻击代码中期望的2个logits输出
        - 返回张量形状为 [batch_size, 2]
        """
        # 使用sigmoid获取正类概率
        probs = torch.sigmoid(outputs)
        # 创建伪logits [负类, 正类]
        class0_logits = torch.log(torch.clamp(1 - probs, min=1e-10))
        class1_logits = torch.log(torch.clamp(probs, min=1e-10))
        return torch.cat([class0_logits, class1_logits], dim=1)

    def run_batch(self, inputs_src: Union[List[str], List[List[str]]],
                  labels: Optional[List[int]] = None) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
        """
        Process a batch of inputs.

        Args:
            inputs_src: List of input strings, token lists (strings), or token ID lists (integers)
            labels: Optional list of target labels

        Returns:
            Logits or (logits, loss) if labels provided
        """
        try:
            # Case 1: List of strings
            if isinstance(inputs_src[0], str):
                # Normal tokenization
                inputs = self.tokenizer(
                    inputs_src,
                    padding=True,
                    truncation=True,
                    max_length=self.block_size,
                    return_tensors='pt'
                )

            # Case 2: List of token IDs (integers)
            elif isinstance(inputs_src[0], list) and isinstance(inputs_src[0][0], int):
                # Process token IDs directly
                input_ids = inputs_src
                # Create attention mask
                attention_mask = [[1] * len(ids) for ids in input_ids]

                # Pad to maximum length
                max_length = min(self.block_size, max(len(ids) for ids in input_ids))
                padded_input_ids = []
                padded_attention_mask = []

                for ids, mask in zip(input_ids, attention_mask):
                    # Truncate
                    ids = ids[:max_length]
                    mask = mask[:max_length]
                    # Pad
                    padded_ids = ids + [self.tokenizer.pad_token_id] * (max_length - len(ids))
                    padded_mask = mask + [0] * (max_length - len(mask))
                    padded_input_ids.append(padded_ids)
                    padded_attention_mask.append(padded_mask)

                # Create input dictionary
                inputs = {
                    'input_ids': torch.tensor(padded_input_ids),
                    'attention_mask': torch.tensor(padded_attention_mask)
                }

            # Case 3: List of token strings
            elif isinstance(inputs_src[0], list) and isinstance(inputs_src[0][0], str):
                # Convert to strings
                inputs_src = [" ".join(tokens) for tokens in inputs_src]
                inputs = self.tokenizer(
                    inputs_src,
                    padding=True,
                    truncation=True,
                    max_length=self.block_size,
                    return_tensors='pt'
                )

            else:
                raise TypeError(f"Unsupported input type: {type(inputs_src[0])}")

            # Move data to the device
            inputs = {k: v.to(self.device) for k, v in inputs.items()}

            # Mixed precision training
            with autocast(enabled=(self.device.type == 'cuda')):
                if labels is not None:
                    if self.num_labels == 1:  # Binary classification
                        labels_tensor = torch.tensor(labels, dtype=torch.float).to(self.device)
                        outputs = self.model(**inputs)

                        # Check if outputs is tensor or object
                        logits = outputs.logits if hasattr(outputs, 'logits') else outputs
                        processed_logits = self._process_logits(logits)
                        loss = nn.BCEWithLogitsLoss()(logits.squeeze(-1), labels_tensor)
                        return processed_logits, loss
                    else:  # Multi-class
                        labels_tensor = torch.tensor(labels, dtype=torch.long).to(self.device)
                        outputs = self.model(**inputs, labels=labels_tensor)

                        # Check if outputs is tensor or object
                        logits = outputs.logits if hasattr(outputs, 'logits') else outputs
                        loss = outputs.loss
                        return self._process_logits(logits), loss
                else:
                    outputs = self.model(**inputs)

                    # Unified handling for both tensor and object outputs
                    logits = outputs.logits if hasattr(outputs, 'logits') else outputs
                    processed_logits = self._process_logits(logits)
                    return processed_logits

        except Exception as e:
            logger.error(f"Batch processing failed: {e}")
            # Return safe default based on context
            default_logits = torch.zeros(1, 2).to(self.device)
            if labels is not None:
                return default_logits, torch.tensor(0.0).to(self.device)
            return default_logits
        #     # Mixed precision training
        #     with autocast(enabled=(self.device.type == 'cuda')):
        #         if labels is not None:
        #             labels_tensor = torch.tensor(labels, dtype=torch.long).to(self.device)
        #             outputs = self.model(**inputs, labels=labels_tensor)
        #             return outputs.logits, outputs.loss
        #         else:
        #             return self.model(**inputs).logits
        # except Exception as e:
        #     logger.error(f"Batch processing failed: {e}")
        #     raise

    def forward(self, inputs: Union[List[str], List[List[str]]],
                labels: Optional[List[int]] = None) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
        """
        Forward pass of the model.

        Args:
            inputs: List of input strings or token lists
            labels: Optional list of target labels

        Returns:
            Logits or (logits, loss) if labels provided
        """
        if labels is not None:
            return self.run_batch(inputs, labels)
        else:
            return self.run_batch(inputs)

    def prob(self, inputs: Union[List[str], List[List[str]]]) -> torch.Tensor:
        """
        Compute class probabilities for inputs.

        Args:
            inputs: List of input strings or token lists

        Returns:
            Tensor of class probabilities
        """
        logits = self.forward(inputs)
        return nn.Softmax(dim=-1)(logits)

    def get_embedding(self, input_ids: torch.Tensor) -> torch.Tensor:
        """
        Get embedding for input IDs with caching.

        Args:
            input_ids: Tensor of input IDs

        Returns:
            Embedding tensor
        """
        # Generate cache key
        cache_key = tuple(input_ids.detach().cpu().flatten().tolist())

        # Return cached embedding if available
        if cache_key in self.__embed_cache:
            return self.__embed_cache[cache_key]

        # Compute and cache embedding
        embeds = self.embed(input_ids)
        self.__embed_cache[cache_key] = embeds
        return embeds


    def grad(self, inputs: Union[List[str], List[List[str]]],
             labels: List[int]) -> torch.Tensor:
        """
        Compute the gradient of the loss with respect to the embedding layer.

        Args:
            inputs: List of input strings or token lists
            labels: List of target labels

        Returns:
            Gradient tensor
        """
        try:
            self.zero_grad()
            self.embed.weight.retain_grad()

            # Case 1: Strings or token strings
            if isinstance(inputs[0], (str, list)):
                # Convert to token IDs
                inputs_tok = self.tokenizer(
                    inputs,
                    padding=True,
                    truncation=True,
                    max_length=self.block_size,
                    return_tensors='pt'
                )
                input_ids = inputs_tok['input_ids'].to(self.device)
                attention_mask = inputs_tok['attention_mask'].to(self.device)

            # Case 2: Already token IDs
            elif isinstance(inputs[0], list) and isinstance(inputs[0][0], int):
                # Create input IDs and attention mask
                attention_mask = [[1] * len(ids) for ids in inputs]
                # Pad both to the max length in batch
                max_length = min(self.block_size, max(len(ids) for ids in inputs))
                padded_input_ids = []
                padded_attention_mask = []

                for ids, mask in zip(inputs, attention_mask):
                    ids = ids[:max_length]
                    mask = mask[:max_length]
                    padded_ids = ids + [self.tokenizer.pad_token_id] * (max_length - len(ids))
                    padded_mask = mask + [0] * (max_length - len(mask))
                    padded_input_ids.append(padded_ids)
                    padded_attention_mask.append(padded_mask)

                input_ids = torch.tensor(padded_input_ids).to(self.device)
                attention_mask = torch.tensor(padded_attention_mask).to(self.device)

            else:
                raise TypeError(f"Unsupported input type: {type(inputs[0])}")

        #     labels_tensor = torch.tensor(labels, dtype=torch.long).to(self.device)
        #
        #     # Get embedding with caching
        #     inputs_embeds = self.get_embedding(input_ids)
        #
        #     # Forward pass
        #     outputs = self.model(
        #         inputs_embeds=inputs_embeds,
        #         attention_mask=attention_mask,
        #         labels=labels_tensor
        #     )
        #
        #     loss = outputs.loss
        #     loss.backward(retain_graph=True)
        #
        #     # Gradient clipping for stability
        #     torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
        #
        #     return self.embed.weight.grad
        # except Exception as e:
        #     logger.error(f"Gradient computation failed: {e}")
        #     raise
            inputs_embeds = self.get_embedding(input_ids)

            # Unified forward pass
            outputs = self.model(
                inputs_embeds=inputs_embeds,
                attention_mask=attention_mask
            )

            # Handle tensor vs object outputs
            logits = outputs.logits if hasattr(outputs, 'logits') else outputs

            # Binary vs multi-class handling
            if self.num_labels == 1:  # Binary classification
                labels_tensor = torch.tensor(labels, dtype=torch.float).to(self.device)
                loss = nn.BCEWithLogitsLoss()(logits.squeeze(-1), labels_tensor)
            else:  # Multi-class
                labels_tensor = torch.tensor(labels, dtype=torch.long).to(self.device)
                loss = nn.CrossEntropyLoss()(logits, labels_tensor)

            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
            return self.embed.weight.grad

        except Exception as e:
            logger.error(f"Gradient computation failed: {e}")
            return torch.zeros_like(self.embed.weight.grad)  # Return zero gradients


if __name__ == "__main__":
    # 本来用作测试的，但是后来没用上

    # Example usage
    from dataset import VulDataset
    from torch import optim

    # Initialize tokenizer
    tokenizer = AutoTokenizer.from_pretrained("../base_models/microsoft/codebert-base")
    config_name = '../base_models/microsoft/codebert-base'
    cache_dir = ""
    # Load dataset
    vul_dataset = VulDataset(
        train_path="../preprocess/dataset/test_subs_0_10.jsonl",
        tokenizer=tokenizer
    )

    # Initialize model
    # 这个例子基本没用，从carrot源码改的，但是改完没用
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    config = RobertaConfig.from_pretrained(config_name if config_name else tokenizer,
                                           cache_dir=cache_dir if cache_dir else None)
    config.num_labels = 1  # 只有一个label

    model = CodeBERTClassifier(
        model_path='../base_models/microsoft/codebert-base',
        config=config,
        num_labels=config.num_labels,
        device=device
    ).train()

    # Set up optimizer
    optimizer = optim.Adam(model.parameters(), lr=1e-5)
    optimizer.zero_grad()

    # Get batch
    b = vul_dataset.train.next_batch(3)

    # Forward and backward pass
    logits, loss = model.run_batch(b['raw'], b['y'])
    loss.backward()
    optimizer.step()

    # Test gradient calculation
    grad = model.grad(b['raw'], b['y'])
    print(f"Gradient shape: {grad.shape}")