# -*- coding: utf-8 -*-
"""
@author: einsam
"""

import json
import re
import pickle, gzip
import random
import numpy
import copy
import os
from transformers import AutoTokenizer
from typing import List, Dict, Any, Optional, Tuple


class Dataset(object):

    def __init__(self, xs: List[List[int]] = [], ys: List[int] = [], raws: Optional[List[List[str]]] = None,
                 ids=None, idx2txt=[], txt2idx={},
                 vocab_size=5000, dtype=None, original_codes=None):

        self.__dtype = dtype
        self.__vocab_size = vocab_size
        self.__idx2txt = idx2txt
        self.__txt2idx = txt2idx
        self.__xs = []  # 改为存储索引而不是字符串
        self.__raws = []
        self.__ys = []
        self.__ids = []
        self.__original_codes = original_codes if original_codes is not None else {}

        if raws is None:
            assert len(xs) == len(ys)
            raws = [None for _ in ys]
        else:
            assert len(xs) == len(ys) and len(ys) == len(raws)
        if ids is None:
            ids = list(range(len(xs)))
        else:
            assert len(xs) == len(ids)
        self.__size = len(xs)

        # 修改 __init__ 方法，避免深度拷贝大列表
        self.__xs = []  # 改为存储索引而不是字符串
        for i, (x, y, r, id_val) in enumerate(zip(xs, ys, raws, ids)):
            self.__raws.append(r)
            self.__ys.append(y)
            self.__ids.append(id_val)
            # 存储原始索引而不是字符串
            self.__xs.append(x)  # 直接存储原始索引列表

        self.__ys = numpy.asarray(self.__ys, dtype=self.__dtype['int'])
        self.__ids = numpy.asarray(self.__ids, dtype=self.__dtype['int'])

        assert self.__size == len(self.__raws) \
               and len(self.__raws) == len(self.__xs) \
               and len(self.__xs) == len(self.__ys) \
               and len(self.__ys) == len(self.__ids)

        self.__epoch = None
        self.reset_epoch()

    def reset_epoch(self):
        self.__epoch = random.sample(range(self.__size), self.__size)

    def next_batch(self, batch_size=32):
        batch = {"x": [], "y": [], "raw": [], "id": [], "new_epoch": False, "original_code": []}
        assert batch_size <= self.__size
        if len(self.__epoch) < batch_size:
            batch['new_epoch'] = True
            self.reset_epoch()
            # 确保有足够样本
            if len(self.__epoch) < batch_size:
                raise ValueError(f"Not enough samples ({len(self.__epoch)}) for batch size {batch_size}")

        idxs = self.__epoch[:batch_size]
        self.__epoch = self.__epoch[batch_size:]

        # 修改点：正确处理整数列表
        batch['x'] = copy.deepcopy([self.__xs[i] for i in idxs])  # 直接使用整数列表

        batch['y'] = numpy.take(self.__ys, indices=idxs, axis=0)
        batch['id'] = numpy.take(self.__ids, indices=idxs, axis=0)
        batch['raw'] = copy.deepcopy([self.__raws[i] for i in idxs])
        # 添加原始代码字符串
        batch['original_code'] = [self.__original_codes.get(i, "") for i in idxs]
        return batch

    def get_size(self):
        return self.__size

    def get_rest_epoch_size(self):
        return len(self.__epoch)

    def get_original_code(self, index):
        return self.__original_codes.get(index, "")


class VulDataset(object):
    """VulDataset类，支持多个JSONL文件"""

    def __init__(self, train_path=None, valid_path=None, test_path=None,
                 vocab_size=-1, dtype='32', tokenizer=None):

        self.__dtypes = self.__dtype(dtype)
        self.tokenizer = tokenizer
        self.__vocab_size = self.tokenizer.vocab_size
        self.__idx2txt = {idx: tokenizer.decode([idx]) for idx in range(tokenizer.vocab_size)}
        self.__txt2idx = tokenizer.get_vocab()

        # 初始化original_codes字典
        self.original_codes = {}

        # 加载训练集
        self.train = self.__load_dataset(train_path) if train_path else None

        # 加载验证集
        self.dev = self.__load_dataset(valid_path) if valid_path else None

        # 加载测试集
        self.test = self.__load_dataset(test_path) if test_path else None

    def __load_dataset(self, path):
        """从JSONL文件加载数据集"""
        if not path:
            return None

        try:
            with open(path, 'r', encoding='utf-8') as f:
                lines = f.readlines()
                data = [json.loads(line) for line in lines]
        except Exception as e:
            print(f"Error loading dataset from {path}: {e}")
            return None

        xs = []
        ys = []
        raws = []
        ids = []
        file_original_codes = {}

        for i, d in enumerate(data):
            try:
                func = d.get('func', '')
                target = d.get('target', 0)
                idx = d.get('idx', i)  # 如果原始数据没有idx，使用顺序索引

                # 存储原始代码字符串（去除冗余空格）
                file_original_codes[i] = re.sub(r'\s+', ' ', func).strip()

                # 添加到全局字典
                self.original_codes[idx] = file_original_codes[i]

                # 分词处理
                tokens = re.findall(r'[a-zA-Z_][a-zA-Z0-9_]*|[0-9]+|\S', func)
                raws.append(tokens)

                # 转换为token ID
                token_ids = []
                for token in tokens:
                    # 处理词表外词汇
                    encoded = self.tokenizer.encode(" " + token, add_special_tokens=False)
                    if len(encoded) == 1:
                        token_ids.append(encoded[0])
                    else:
                        token_ids.append(self.tokenizer.unk_token_id)

                xs.append(token_ids)
                ys.append(target)
                ids.append(idx)

            except Exception as e:
                print(f"Error processing sample {i}: {e}")
                continue

        return Dataset(
            xs=xs, ys=ys, raws=raws, ids=ids,
            idx2txt=self.__idx2txt,
            txt2idx=self.__txt2idx,
            vocab_size=self.__vocab_size,
            dtype=self.__dtypes,
            original_codes=file_original_codes
        )

    def get_original_code(self, index):
        """根据索引获取原始代码字符串"""
        return self.original_codes.get(index, "")

    def __dtype(self, dtype='32'):
        assert dtype in ['16', '32', '64']
        if dtype == '16':
            return {'fp': numpy.float16, 'int': numpy.int16}
        elif dtype == '32':
            return {'fp': numpy.float32, 'int': numpy.int32}
        elif dtype == '64':
            return {'fp': numpy.float64, 'int': numpy.int64}

    def get_dtype(self):
        return self.__dtypes

    def get_vocab_size(self):
        return self.__vocab_size

    def get_idx2txt(self):
        return copy.deepcopy(self.__idx2txt)

    def get_txt2idx(self):
        return copy.deepcopy(self.__txt2idx)

    def vocab2idx(self, vocab):
        if vocab in self.__txt2idx.keys():
            return self.__txt2idx[vocab]
        else:
            return self.__txt2idx['<unk>']

    def idx2vocab(self, idx):
        if 0 <= idx < len(self.__idx2txt):
            return self.__idx2txt[idx]
        else:
            return '<unk>'


if __name__ == "__main__":
    import time

    start_time = time.time()

    # Initialize tokenizer - should be the same one used by your model
    tokenizer = AutoTokenizer.from_pretrained("microsoft/codebert-base")

    # Create dataset with tokenizer
    dataset = VulDataset(test_path="../data/vul_data.jsonl", tokenizer=tokenizer)

    print("time cost = " + str(time.time() - start_time) + " sec")

    start_time = time.time()
    b = dataset.test.next_batch(2)
    print("time cost = " + str(time.time() - start_time) + " sec")
    for i, x in enumerate(b['x']):
        print(x)
        print("Original code:", b['original_code'][i])