# -*- coding: utf-8 -*-
from scapy.all import *
import os
from calcEntropy import PcapFeatures
import statistics
from txtop import TxtOperate

def read_pcap(file_name):
    print('开始读取pcap')
    file_data = rdpcap(file_name)
    print('读取完成')
    return file_data


def fenliu(file_data):
    port = []
    l = len(file_data)
    for i in range(0, l):
        try:
            # syn包的tcp标志位为0x02,跟据syn包先将源端口组成列表，返回port列表
            if file_data[i]['TCP'].flags == 0x02:
                port.append(file_data[i]['TCP'].sport)
        except:
            pass
    return port


def fenbao(file_data):
    port = fenliu(file_data)
    l1 = len(port)
    data_list = [[] for _ in range(l1)]
    #data_list=[[]]*l1  不能这样创建多维列表，会出问题，应该用上面的方式
    l = len(file_data)
    for i in range(0, l):
        try:
            #源端口在port列表里，则端口获取在列表中的索引，放入对应的data_list中
            if file_data[i]['TCP'].sport in port:
                j = port.index(file_data[i]['TCP'].sport)
                data_list[j].append(file_data[i])
            # 目的端口在port列表里，则端口获取在列表中的索引，放入对应的data_list中
            elif file_data[i]['TCP'].dport in port:
                j = port.index(file_data[i]['TCP'].dport)
                data_list[j].append(file_data[i])
        except:
            pass
    return data_list


#遍历文件夹中所有文件
def scaner_file(url):
    file  = os.listdir(url)
    return file

#正则匹配文件名是否匹配
def choosefile(filename):
    if re.match(r'ips\w+',filename) != None:
        return True
    else:
        return False

#xiaobao是分割后pcap流文件路径
def save_file(dir, data_list):
    xiaobao = []
    file_name = []
    l = len(data_list)
    print('开始保存pcap,共有' + str(l) + '个pcap包')
    for i in range(0, l):
        name = dir + 'ips' + str(i) + '.pcap'
        wrpcap(name, data_list[i])
        xiaobao.append(name)
        file_name.append('ips' + str(i))
    print('保存完成')
    return xiaobao,file_name,l


if __name__ == "__main__":
    dir_name = os.path.dirname(__file__) + '/'
    # #file_name = dir_name + "ss4.0.7.pcap"
    # #file_name = dir_name + "http2.pcap"
    # # file_name = dir_name + "https1.pcap"
    # file_name = dir_name + "ss5-3.4.4.pcap"
    sum = 0  #记录滤掉的流数
    x = 0 #记录流数
    i = 0
    filtered = [] #被过滤的流列表
    retained = [] #被保留的流列表
    url = os.path.abspath(os.path.dirname(__file__))
    file = scaner_file(url)
    for filename in file:
        if choosefile(filename):
            x += 1
            pcap_feat = PcapFeatures(url+'/'+filename)
            b = pcap_feat.getHttp_S_ReqEntropy()
            cutoff = 10  #N截断的N
            if len(b) > 0:
                average = statistics.mean(b[:cutoff])
                if average < 5.24916179173597 or average > 6.212293999527468:   #被滤掉
                    sum += 1
                    filtered.append(filename)
                    # print(filename)
                else:
                    retained.append(filename)
            else:
                retained.append(filename)

    path = dir_name+'/first_step_retained.txt'                
    txtort = TxtOperate()
    txtort.Writetxt(path,str(retained))

    print(sum)
    print('filter proportion:',str(float(sum)/x*100),'%')
    
