from sys import flags
from scapy.all import *
from scapy.utils import PcapReader
import numpy as np
import os 
from os import path 
from txtop import TxtOperate


#遍历文件夹中所有文件
def scaner_file(url):
    file  = os.listdir(url)
    return file

#正则匹配文件名是否匹配
def choosefile(filename):
    if re.match(r'ips\w.+',filename) != None:
        return True
    else:
        return False

# 找到客户端IP，将第一个TCP包的源IP作为上行数据流的发起者（客户端），因为TCP流总是客户端先发起的
def find_port(packets):
    j = 0
    while(True):
        if packets[j].haslayer("TCP"):
            upsrc = packets[j].payload.src
            downsrc = packets[j].payload.dst
            print(upsrc,'   ',downsrc)
            break
        j = j+1

#基于流计算数据包长度熵
def calc_ent(x):
    """
        calculate shanno ent of x
    """
    x_value_list = set(x)
    ent = 0.0
    for x_value in x_value_list:
        p = float(x.count(x_value)) / len(x)
        logp = np.log2(p)
        ent -= p * logp
    return ent


if __name__ =="__main__":
    # url = ''
    x = 0
    y = 0
    # url = os.path.abspath(os.path.dirname(__file__))
    # file = scaner_file(url)
    dir_name = os.path.dirname(__file__) + '/'
    path = dir_name+'/first_step_retained.txt'
    txtort = TxtOperate()
    l = txtort.Readtxt(path)  #经过第一重过滤，剩下的文件名列表
    for filename in l:        #遍历文件列表中的文件ips***
        len_lst = []
        if choosefile(filename): #判断是否符合正则
            x += 1
            packets = rdpcap(dir_name+filename)
            for data in packets:
                if data.haslayer("TCP"):
                    len_lst.append(len(data))
            print(calc_ent(len_lst))
            if calc_ent(len_lst) < 3.3:
                y += 1
                # print(filename)
        print(len_lst)
    # print(float(y)/x)

    # print(len_lst)

            # with open(url+"/"+"https_length_entropy.txt", "a") as file:
            #     file.write(str(calc_ent(len_lst)) + "\n")
    print('Done!')