import os
from pathlib import Path
import json
import pandas as pd
from difflib import SequenceMatcher
from utils import TrafficUtils, CSVUtils
from Washlog import WashLog
import Levenshtein 
import csv
# 初始化 WashLog 实例
wash_log = WashLog()

# 相似度计算函数
def similarity(a, b):
    return 1 - (Levenshtein.distance(a, b) / max(len(a), len(b)))

# 遍历数据集路径下的所有 MD5 文件夹
dataset_path = Path("/dataset/malware")
for md5_folder in dataset_path.iterdir():
    if md5_folder.is_dir():
        print(f"正在处理文件夹: {md5_folder.name}")
        
        # 定义各类文件的路径
        base_path = md5_folder
        strace_file = base_path / f"{md5_folder.name}.strace"
        pcap_file = base_path / f"{md5_folder.name}.pcap"
        brust_file = base_path / f"{md5_folder.name}.brust"
        csv_file = base_path / f"{md5_folder.name}.csv"
        brust_json_file = base_path / f"{md5_folder.name}_brust.json"
        graph_file = base_path / f"{md5_folder.name}.gexf"
        # ip_json_file = base_path / f"{md5_folder.name}.json"

        # 初始化 TrafficUtils 实例，为每个文件夹单独传入路径
        trafficutils = TrafficUtils(str(base_path), f"{md5_folder.name}.brust")

        # 第一步：生成 CSV 和 JSON 文件
        if strace_file.exists():
            fs, ip_content_list = wash_log.parse_logs(str(strace_file))

            with open(csv_file, mode='w', newline='') as csv_f:
                csv_writer = csv.writer(csv_f)
                csv_writer.writerow(["PID", "Function", "Params", "Content", "Result"])

                for entry in fs:
                    csv_writer.writerow([entry[0], entry[1], entry[2], entry[3], entry[4]])

            # wash_log.save_ip_content_to_json(ip_json_file, ip_content_list)

        # 第二步：分割 pcap 并生成 brust
        if pcap_file.exists():
            trafficutils.split_cap(str(base_path), str(pcap_file))
            for split_pcap in base_path.rglob("*.pcap"):
                trafficutils.get_burst_feature(str(split_pcap))

             # 删除拆分后的 pcap 文件，保留原始 pcap 文件
            for split_pcap in base_path.rglob("*.pcap"):
                if split_pcap.name != f"{md5_folder.name}.pcap":
                    split_pcap.unlink()  # 删除文件
        
        # 第三步：生成 brust 的 JSON
        if brust_file.exists():
            brust_json = []
            pay_load = []
            brust = ''

            with open(brust_file, 'r') as brust_f:
                for line in brust_f:
                    if trafficutils.is_hex_format(line):
                        brust += line.strip() + ' '
                    elif line.strip() == '':
                        brust_json.append({
                            "payload": pay_load,
                            "brust": brust
                        })
                        pay_load = []
                        brust = ''
                    else:
                        pay_load.append(line)

            wash_log.save_ip_content_to_json(brust_json_file, brust_json)

        # 第四步：寻找 brust 对应的 payload-content
        if os.path.exists(brust_json_file) and os.path.exists(csv_file):
            # 读取 JSON 文件
            with open(brust_json_file, 'r') as json_f:
                data = json.load(json_f)

            # 读取 CSV 文件
            df = pd.read_csv(csv_file)
            df['Matched_Brust'] = None  # 初始化新列

            # 遍历 CSV 中 Content 列有内容的行
            for index, row in df[df['Content'].notna()].iterrows():
                content = row['Content'].strip()
                matched_brust = None

                # 遍历 JSON 数据，寻找匹配的 Brust
                for entry in data:
                    for payload in entry['payload']:
                        sim = similarity(content, payload.strip())
                        if sim >= 0.90:  # 匹配阈值为 95%
                            matched_brust = entry['brust']
                            break  # 结束内层循环
                    if matched_brust:
                        break  # 结束外层循环

                # 将匹配的 Brust 写入新列中
                if matched_brust:
                    df.at[index, 'Matched_Brust'] = matched_brust

            # 覆盖保存 CSV 文件
            df.to_csv(csv_file, index=False)
            print(f"已生成并覆盖 CSV 文件：{csv_file}")

        if os.path.exists(graph_file):
            os.remove(graph_file)