import os
import re
import json
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from torch.cuda.amp import autocast

# 设置模型路径和量化配置
model_name = "Qwen/Qwen2.5-Coder-3B-Instruct"
quantization_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16
)

# 加载模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='left')
model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config).to(device)

# 读取文件夹中的所有代码文本
def read_code_files(folder_path):
    code_texts = []
    for filename in os.listdir(folder_path):
        if filename.endswith(".txt"):
            file_path = os.path.join(folder_path, filename)
            with open(file_path, 'r', encoding='utf-8') as file:
                code_texts.append(file.read())
    return code_texts

# 提取有关JSON的部分
def extract_json_from_response(response):
    # 使用正则表达式查找 JSON 格式的文本
    json_pattern = re.compile(r'\{.*?\}', re.DOTALL)
    matches = json_pattern.findall(response)
    
    # 尝试解析第一个匹配的 JSON 字符串
    for match in matches:
        try:
            json_response = json.loads(match)
            # 检查是否符合预期格式
            if all(key in json_response for key in ['encryption', 'decryption', 'signature', 'verification', 'algorithm']):
                return json_response
        except json.JSONDecodeError:
            continue
    
    # 如果没有找到有效的 JSON，返回默认格式
    return {"encryption": 0, "decryption": 0, "signature": 0, "verification": 0, 'algorithm': ""}

# 使用模型分析代码文本并返回JSON格式的结果
def analyze_code_batch(code_texts, tokenizer, model, device, batch_size=4):
    results = []
    for i in range(0, len(code_texts), batch_size):
        batch_texts = code_texts[i:i + batch_size]
        prompts = [
            (
                f"分析以下代码，判断其中是否包含加密、解密、签名和验签操作，并识别使用的算法。\n"
                f"结果需以JSON格式返回，具体要求如下：\n"
                f"1. 加密、解密、签名和验签的结果用0或1表示。\n"
                f"2. 算法识别结果用一句话描述。\n"
                f"JSON格式如下：{{'encryption': 0/1, 'decryption': 0/1, 'signature': 0/1, 'verification': 0/1, 'algorithm': '使用的算法'}}\n"
                f"代码：\n{code_text}\n"
                f"答案："
            )
            for code_text in batch_texts
        ]
        inputs = tokenizer(prompts, return_tensors="pt", truncation=True, padding='max_length', max_length=1024).to(device)
        with torch.no_grad(), autocast():
            print(f"analyzing code_text_{i}")
            outputs = model.generate(**inputs, max_new_tokens=512, num_return_sequences=1)
        
        # 提取有效部分
        for j in range(len(batch_texts)):
            response = tokenizer.decode(outputs[j], skip_special_tokens=True)
            json_response = extract_json_from_response(response)
            
            # 确保每个算法列表中的元素用逗号隔开
            for key in json_response:
                if isinstance(json_response[key], list):
                    json_response[key] = [item.strip() for item in ','.join(json_response[key]).split(',')]
            
            results.append(json_response)
    
    return results

# 处理所有代码文本并保存结果
def process_and_save_results(folder_path, output_folder):
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    
    code_texts = read_code_files(folder_path)
    results = analyze_code_batch(code_texts, tokenizer, model, device, batch_size=4)
    
    for i, result in enumerate(results):
        output_file_path = os.path.join(output_folder, f'output_{i}.json')
        with open(output_file_path, 'w', encoding='utf-8') as file:
            json.dump(result, file, ensure_ascii=False, indent=4)
        print(f"文件 {i} 的分析结果已保存到 {output_file_path}")

# 主函数入口
if __name__ == "__main__":
    code_folder = "segmentation/code_texts"
    output_folder = "output"
    process_and_save_results(code_folder, output_folder)
    print(f"所有分析结果已保存到 {output_folder} 文件夹")