import sentencepiece as spm
import json
from tqdm import tqdm

class ASTTokenizer:
    def __init__(self, model_path):
        self.sp = spm.SentencePieceProcessor()
        self.sp.Load(model_path)
        
        # 构建词汇表映射
        self.vocab = {self.sp.IdToPiece(i): i for i in range(self.sp.GetPieceSize())}
        self.inverse_vocab = {i: self.sp.IdToPiece(i) for i in range(self.sp.GetPieceSize())}
    
    def encode_sequence(self, symbols):
        """将符号序列转换为数字向量"""
        return [self.sp.PieceToId(s) for s in symbols]
    
    def decode_sequence(self, ids):
        """将数字向量转换回符号序列"""
        return [self.inverse_vocab.get(int(i), "<UNK>") for i in ids]

def process_dataset(input_file, output_file, tokenizer):
    """处理整个数据集"""
    with open(input_file, 'r') as fin, open(output_file, 'w') as fout:
        for line in tqdm(fin, desc="Processing"):
            try:
                item = json.loads(line)
                
                # 编码两个序列
                encoded_obf = tokenizer.encode_sequence(item['obfuscated'])
                encoded_org = tokenizer.encode_sequence(item['original'])
                
                # 创建新条目
                new_item = {
                    "obfuscated": encoded_obf,
                    "original": encoded_org
                }
                
                fout.write(json.dumps(new_item) + '\n')
            except Exception as e:
                print(f"Error processing line: {str(e)}")
                continue

if __name__ == "__main__":
    import argparse
    
    # 创建参数解析器
    parser = argparse.ArgumentParser(description='AST符号处理工具')
    parser.add_argument('--model_path', required=True, help='SentencePiece模型路径')
    parser.add_argument('--input_file', required=True, help='原始数据集路径')
    parser.add_argument('--output_file', required=True, help='处理后的输出路径')
    args = parser.parse_args()
    
    # 初始化tokenizer
    tokenizer = ASTTokenizer(args.model_path)
    
    # 处理数据集
    process_dataset(
        input_file=args.input_file,
        output_file=args.output_file,
        tokenizer=tokenizer
    )
    
    # 验证示例
    print("\n验证示例:")
    test_symbols = ["Identifier", "FunctionCall", "VariableDeclaration"]
    encoded = tokenizer.encode_sequence(test_symbols)
    decoded = tokenizer.decode_sequence(encoded)
    
    print(f"原始符号: {test_symbols}")
    print(f"编码结果: {encoded}")
    print(f"解码结果: {decoded}")