import pandas as pd
import os
import json
from collections import defaultdict
import re
import networkx as nx
import matplotlib.pyplot as plt
from pathlib import Path

def generate_pid_csv(data, output_dir):
    """
    Generate individual CSV files for each unique PID.
    """
    # Iterate over each unique PID and save a separate CSV
    for pid in data['PID'].unique():
        pid_data = data[data['PID'] == pid]
        output_file = f'{output_dir}/PID_{pid}.csv'
        pid_data.to_csv(output_file, index=False)
    
    print(f"All PID CSV files generated in '{output_dir}'.")

def generate_process_relation(data, output_dir):
    """
    Generate process relations and save as a JSON file.
    """
    process_creation_calls = ['clone', 'fork', 'vfork', 'posix_spawn']
    process_tree = defaultdict(list)
    
    # Build the process tree based on the function calls
    for _, row in data.iterrows():
        if row['Function'] in process_creation_calls and pd.notna(row['Result']):
            parent_pid = row['PID']
            child_pid = int(row['Result'].split()[0])
            process_tree[parent_pid].append(child_pid)
    
    # Save the process tree as JSON
    output_file = f'{output_dir}/process_tree.json'
    with open(output_file, 'w') as f:
        json.dump(process_tree, f, indent=4)
    
    print(f"Process tree saved as '{output_file}'.")

def find_ip(syscall:str,params:str) -> tuple[list,bool]:
    results = []
    if syscall == 'connect' or syscall == 'bind':
        """  
        match tcp and tcpv6 and udp and udpv6
        """
        connect_ipv4_pattern = re.compile(r'htons\((\d+)\).*?inet_addr\("(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"\)')
        connect_ipv6_pattern = re.compile(r'htons\((\d+)\).*?inet_pton\(AF_INET6,\s*"([\w:\.]+)"')

        match_connect_ipv4_pattern = re.search(connect_ipv4_pattern,params)
        if match_connect_ipv4_pattern:
            port = match_connect_ipv4_pattern.group(1)
            ip = match_connect_ipv4_pattern.group(2)
            results.append((port,ip))
        
        match_connect_ipv6_pattern = re.search(connect_ipv6_pattern,params)
        if match_connect_ipv6_pattern:
            port = match_connect_ipv6_pattern.group(1)
            ip = match_connect_ipv6_pattern.group(2)
            results.append((port,ip))
    
    else:
        compl_pattern = re.compile('\[(?:(?:(?:\[([0-9a-fA-F:.]+)\])|([0-9.]+)):(\d+))(?:->(?:(?:\[([0-9a-fA-F:.]+)\])|([0-9.]+)):(\d+))?\]')
        for match in compl_pattern.finditer(params):
            source_ip = match.group(1) or match.group(2)
            source_port = match.group(3)
            destination_ip = match.group(4) or match.group(5)
            destination_port = match.group(6)
            results.append((source_port,source_ip))
            if destination_ip and destination_port:
                results.append((destination_port,destination_ip))
    if len(results) > 0:
        return results,True
    return [],False

def build_fd_graph(inp, output_gexf_path):
    """
    Generate a function call graph based on file descriptors and save it as GEXF.
    """
    df = pd.read_csv(inp)
    # 可以匹配所有的fd，包括unix和tcp和udp的。
    fd_pattern = re.compile(r'\d+<(?:TCP|UDP|TCPv6|UDPv6):\[(?:\[(?:[0-9a-fA-F:\.]+)\]:(?:\d+)(?:->)?(?:\[(?:[0-9a-fA-F:\.]+)\]:(?:\d+))?|(?:[0-9\.]+):(?:\d+)(?:->(?:[0-9\.]+):(?:\d+))?)\]>'\
                                 r'|'\
                                 r'\d+<.*?>'
                                 )
    G = nx.DiGraph()
    # 存储fd和节点 
    fd_dict = {}
    # 存储fd的connect信息
    connect_info = {} 
    # 已经conneced 的fd与原fd的映射
    connect_fds = {}
    traffic = 1

    for i, row in df.iterrows():
        if '-1' in str(row['Result']) and str(row['Function']) != 'connect' :
            continue
        result_fd = fd_pattern.findall(str(row['Result']))
        param_fds = fd_pattern.findall(str(row['Params']))
        ip_info, valid = find_ip(str(row['Function']),str(row['Params']))

        if result_fd:
            for fd in result_fd:
                """  
                    结果有fd，则直接添加进fd_dict，并且直接添加到图里。
                """
                full_fd = fd
                params_str = str(row['Params'])
                if full_fd not in G:
                    G.add_node(full_fd, params=params_str)
                else:
                    G.nodes[full_fd]['params'] += f" {params_str}"
                fd_dict[full_fd] = full_fd
        
        # 如果是connect、bind则做fd-ip的映射，假如不是，则做fd-fd的映射。
        if valid:
            # 如果为connect则fd与ip的映射。
            if str(row['Function']) == 'connect' or str(row['Function']) == 'bind':
                if param_fds:
                    ip_fd = param_fds[0]
                    connect_info[ip_fd] = ip_info
            # 如果不是connect/bind，则根据ip映射对应上东西。
            else:
                for key, value_list in connect_info.items():
                    skip_key = False  # 标志变量
                    for ip in ip_info:
                        for value in value_list:
                            if ip == value:
                                try:
                                    for param_fd in param_fds:
                                        if 'TCP' in param_fd or 'UDP' in param_fd:
                                            if param_fd.split('<')[0] == key.split('<')[0]:
                                                _, isip = find_ip(str(row['Function']), str(key))
                                                if isip:
                                                    skip_key = True  # 设置标志
                                                    break  # 跳出 param_fd 循环
                                                connect_fds[param_fd] = key
                                except:
                                    pass
                        if skip_key: 
                            break  
                    if skip_key:  # 如果跳过，进入下一个 key
                        continue

        
        first_match = None
        for fd in param_fds:
            if fd not in fd_dict and fd not in connect_fds:
                G.add_node(fd, params=str(row['Params']))
                fd_dict[fd] = fd
                first_match = fd
                continue
            # 如果是第一次匹配，而且在fd_dict，就直接弄。
            if first_match is None:
                try:
                    current_node = fd_dict[fd]
                except:
                    fd = connect_fds[fd]
                    current_node = fd_dict[fd]
                first_match = fd   #这里才更新first_match，不会会出错。
                new_node = f"{fd}-{row['Function']}"
                params_str = str(row['Params'])
                if new_node not in G:
                    G.add_node(new_node, params=params_str)
                else:
                    G.nodes[new_node]['params'] += f" {params_str}"
                G.add_edge(current_node, new_node)
                fd_dict[fd] = new_node
            # 如果不是第一次了
            else:
                try:
                    current_node = fd_dict[fd]
                except KeyError:
                    fd = connect_fds[fd]
                    current_node = fd_dict[fd]
                G.add_edge(current_node, fd_dict[first_match])
        # 如果result_fd和param_fds都存在，则添加边。
        if result_fd and param_fds:
            for fd in param_fds:
                if fd not in fd_dict and fd not in connect_fds:
                    continue
                try:
                    current_node = fd_dict[fd]
                except:
                    fd = connect_fds[fd]
                    current_node = fd_dict[fd]
                for result in result_fd:
                    if result in fd_dict or result in connect_fds:
                        try:
                            resultfd = connect_fds[result]
                            print(resultfd)
                        except: 
                            resultfd = result
                        G.add_edge(current_node, resultfd)
        
        # 如果存在Matched_Brust,则只有一个fd，不会存在多个fd的。
        # 检查是否存在Matched_Brust列
        # problem：没有直接检查sn是否在图中。忘记添加了
        if 'Matched_Brust' in df.columns:
            if pd.notna(row['Matched_Brust']):
                if str(row['Function']) in ['sendmsg', 'sendto']:
                    # 通过connect_fds来做一个映射。
                    try:
                        sn = f"{connect_fds[param_fds[0]]}-{row['Function']}"
                    except:
                        sn = f"{param_fds[0]}-{row['Function']}"
                    
                    if sn not in G:
                        G.add_node(sn, params=str(row['Params']))
                    tn = f'traffic{str(traffic)}'
                    traffic = traffic + 1
                    G.add_node(tn,burst = str(row['Matched_Brust']))
                    G.add_edge(sn,tn)
                if str(row['Function']) in ['recvfrom', 'recvmsg']:    
                    # 通过connect_fds来做一个映射。
                    try:
                        sn = f"{connect_fds[param_fds[0]]}-{row['Function']}"
                    except:
                        sn = f"{param_fds[0]}-{row['Function']}"
                    if sn not in G:
                        G.add_node(sn, params=str(row['Params']))
                    tn = f'traffic{str(traffic)}'
                    traffic = traffic + 1
                    G.add_node(tn,burst = str(row['Matched_Brust']))
                    # 是这里新加了一个节点。
                    G.add_edge(tn,sn)
                    
    if G.number_of_nodes() > 0:
        nx.write_gexf(G, output_gexf_path)

if __name__ == "__main__":
    """
        1、划分CSV
        2、生成process relation。
        3、根据pid—csv生成graph。  
    """
    dataset_path = Path("/dataset/goodware")
    for md5_folder in dataset_path.iterdir():
        if md5_folder.is_dir():
            print(f"正在处理文件夹: {md5_folder.name}")
            # 定义各类文件的路径
            base_path = md5_folder
            csv_file = base_path / f"{md5_folder.name}.csv"

            # a8dfb53f36c703e1faecfd0f189e7cc8
            
            # relation_file = base_path / f"{md5_folder.name}_relation.json"
            if csv_file.exists():
                # csv = pd.read_csv(csv_file)
                # generate_pid_csv(csv,base_path)
                # generate_process_relation(csv,base_path)
                # for split_csv in base_path.rglob("*.csv"):
                #     if split_csv.name != f"{md5_folder.name}.csv":
                    gexf_name = f"{md5_folder.name}.gexf"
                    outPath = base_path / gexf_name
                    build_fd_graph(csv_file,outPath)
                    os.remove(csv_file)
                    