import hmac
import logging
import struct
from collections import defaultdict
from random import random, seed
from time import time_ns

from scapy.compat import raw
from scapy.layers.inet import IP, TCP, UDP
from policy import *
import sys

sys.path.append('/home/nfv/vSFC-p')
from NFs.network_address_translation import NAT
from NFs.merger import Merger
from NFs.intrusion_detection_system import IDS
from NFs.firewall import Firewall


# 流正确性校验协议
class FlowCorrectness(object):
    def __init__(self, network_function, network_function_identifier: int, name: str):
        self.network_function = network_function  # 网络功能
        self.network_function_identifier = network_function_identifier  # 标识符
        self.name = name  # 日志保存文件

        self.in_dst_nf = None
        self.in_src_nf = None
        self.in_pkt_identifier = None
        self.in_auth = None
        self.in_flow_identifier = None
        self.in_seq_num = None

        self.out_dst_nf = None
        self.out_src_nf = None
        self.out_pkt_identifier = None
        self.out_auth = None
        self.out_flow_identifier = None
        self.out_seq_num = None

        self.key = b'flow'
        self.pkt_id_count = 0
        self.flow_id_count = 0
        # 采样
        self.is_sample_packet = False
        self.per_flow_sample_para = defaultdict(float)

        # sequence number
        self.per_flow_nf_seq_num_send = defaultdict(int)
        self.per_flow_nf_seq_num_receive = defaultdict(int)

        # TODO: logging, may be async
        _format = '%(asctime)s - %(levelname)s - %(filename)s [line:%(lineno)d] : %(message)s'
        logging.basicConfig(format=_format, filename=self.name, level=logging.INFO)
        self.logger = logging.getLogger(self.name)

        # 对与相同的flow hash 返回相同的flow identifier
        self.flow_identifier_map = defaultdict(int)
        self.original_ttl = None
        seed(1)

    def un_pack(self, pkt: bytes):
        # 解析出必须的字段
        self.in_pkt_identifier, self.in_src_nf, self.in_dst_nf, self.in_flow_identifier, self.in_seq_num = struct.unpack(
            '>IIIHI', pkt[-34:-16])
        self.in_auth = pkt[-16:]

    def pack(self):
        res = struct.pack('>IIIHI', self.out_pkt_identifier, self.out_src_nf, self.out_dst_nf, self.out_flow_identifier,
                          self.out_seq_num)
        return res

    def decrease_len_chksum(self, pkt: bytes):
        # 重新生成chksum, 更新len
        scapy_pkt = IP(pkt)
        del scapy_pkt[IP].chksum
        scapy_pkt[IP].len -= 34  # 还原最初的包
        self.original_ttl = scapy_pkt[IP].ttl
        scapy_pkt[IP].ttl = 64
        return raw(IP(bytes(scapy_pkt)))

    def increase_len_chksum(self, pkt: bytes):
        scapy_pkt = IP(pkt)
        del scapy_pkt[IP].chksum
        scapy_pkt[IP].len += 34
        self.original_ttl = scapy_pkt[IP].ttl
        return raw(IP(bytes(scapy_pkt)))

    def retain_len_chksum(self, pkt: bytes):
        scapy_pkt = IP(pkt[:-34])
        del scapy_pkt[IP].chksum
        scapy_pkt[IP].ttl = self.original_ttl
        return bytes(IP(bytes(scapy_pkt)))

    def verify_integrity(self, pkt: bytes):  # 校验
        self.un_pack(pkt)  # 解析出字段
        self.is_sample_packet = False
        if self.network_function_identifier != self.in_dst_nf:  # 目的NF不匹配
            self.logger.error(f"Invalid destination {self.in_dst_nf} for {self.network_function_identifier}.")
            return False
        if self.per_flow_nf_seq_num_receive[(self.in_flow_identifier, self.in_src_nf)] != self.in_seq_num:  # 序号不匹配
            self.logger.error(
                f"Invalid packet sequence number {self.in_seq_num} for"
                f" {self.per_flow_nf_seq_num_receive[(self.in_flow_identifier, self.in_src_nf)]}")
            return False
        msg = pkt[:-16]  # pkt|pktID|srcNF|dstNF
        auth = hmac.new(self.key, msg + b'0', digestmod='MD5').digest()
        if auth != self.in_auth:
            auth = hmac.new(self.key, msg + b'1', digestmod='MD5').digest()
            if auth != self.in_auth:
                self.logger.error(f"Invalid packet {self.in_pkt_identifier, self.in_flow_identifier, self.in_seq_num}, "
                                  f"may be injected.")
                return False
            else:
                self.is_sample_packet = True
                return True
        else:
            return True

    def out_flow_counter(self, flow_identifier: int, dst_nf: int):
        key = (flow_identifier, dst_nf)
        cnt = self.per_flow_nf_seq_num_send[key]
        self.per_flow_nf_seq_num_send[key] = cnt + 1
        return cnt

    def in_flow_counter(self, flow_identifier: int, src_nf: int):
        key = (flow_identifier, src_nf)
        cnt = self.per_flow_nf_seq_num_receive[key]
        self.per_flow_nf_seq_num_receive[key] += 1
        return cnt

    def generate(self, pkt: bytes):  # 网关处
        flow_hash = self.compute_flow_hash(pkt)
        self.out_pkt_identifier = self.generate_pkt_identifier()
        self.out_src_nf = self.network_function_identifier
        self.out_dst_nf = self.policy(self.out_src_nf)[0]
        self.out_flow_identifier = self.generate_flow_identifier(flow_hash)
        self.out_seq_num = self.out_flow_counter(self.out_flow_identifier, self.out_dst_nf)
        msg = pkt + self.pack()
        if self.sample(flow_hash):
            self.out_auth = hmac.new(self.key, msg + b'1', digestmod='MD5').digest()
            self.logger.info((b'null', msg + self.out_auth, self.network_function_identifier, time_ns()))
        else:
            self.out_auth = hmac.new(self.key, msg + b'0', digestmod='MD5').digest()
        return self.increase_len_chksum(msg + self.out_auth)

    def process(self, pkt: bytes):
        pkt = self.decrease_len_chksum(pkt)
        self.verify_integrity(pkt)
        if True:
            msg = pkt[:-34]  # pkt
            out_msg = self.network_function.match(msg)
            if out_msg is False:
                return
            elif out_msg is True:
                out_msg = msg
            self.out_pkt_identifier = self.in_pkt_identifier
            self.out_src_nf = self.network_function_identifier
            self.out_dst_nf = self.policy(self.out_src_nf)[0]  # 只有一个
            self.out_flow_identifier = self.in_flow_identifier
            self.in_flow_counter(self.in_flow_identifier, self.in_src_nf)  # 1
            self.out_seq_num = self.out_flow_counter(self.out_flow_identifier, self.out_dst_nf)
            msg = out_msg + self.pack()
            if self.is_sample_packet:
                self.out_auth = hmac.new(self.key, msg + b'1', digestmod='MD5').digest()
                self.logger.info((pkt, msg + self.out_auth, self.network_function_identifier, time_ns()))
            else:
                self.out_auth = hmac.new(self.key, msg + b'0', digestmod='MD5').digest()
            return self.increase_len_chksum(msg + self.out_auth)

    def merge(self, pkt: bytes):
        pkt = self.decrease_len_chksum(pkt)
        if self.verify_integrity(pkt):
            out_msg, data = self.network_function.match(pkt, self.in_pkt_identifier, self.in_src_nf)
            if out_msg is False:
                return False
            else:
                out_msg = out_msg[:-34]  # 在我这里
            # 返回了合并结果
            self.out_pkt_identifier = self.in_pkt_identifier
            self.out_src_nf = self.network_function_identifier
            self.out_flow_identifier = self.in_flow_identifier
            dst_nfs = self.policy(self.out_src_nf, self.out_pkt_identifier)
            inputs, outputs = [], []
            for packet, src_nf in data:
                inputs.append(self.increase_len_chksum(packet))
                self.in_flow_counter(self.out_flow_identifier, src_nf)
            for dst_nf in dst_nfs:
                self.out_dst_nf = dst_nf
                self.out_seq_num = self.out_flow_counter(self.out_flow_identifier, dst_nf)
                msg = out_msg + self.pack()
                if self.is_sample_packet:
                    self.out_auth = hmac.new(self.key, msg + b'1', digestmod='MD5').digest()
                    out_pkt = msg + self.out_auth
                    outputs.append(self.increase_len_chksum(out_pkt))
                    for packet in inputs:
                        self.logger.info((packet, out_pkt, self.network_function_identifier, time_ns()))
                else:
                    self.out_auth = hmac.new(self.key, msg + b'0', digestmod='MD5').digest()
                    outputs.append(self.increase_len_chksum(msg + self.out_auth))
            return outputs

    def exit_gateway(self, pkt: bytes):
        pkt = self.decrease_len_chksum(pkt)
        if self.verify_integrity(pkt):
            self.in_flow_counter(self.in_flow_identifier, self.in_src_nf)
            if self.is_sample_packet:
                self.logger.info((pkt, b'null', self.network_function_identifier, time_ns()))
            return self.retain_len_chksum(pkt)

    def generate_pkt_identifier(self):
        self.pkt_id_count += 1
        if self.pkt_id_count == 4294967295:
            self.pkt_id_count = 0
        return self.pkt_id_count

    @staticmethod
    def policy(src_nf: int, pkt_identifier: int = 0, flow_identifier: int = 0):
        # next-hop NF
        # not use pkt_identifier, as all packet go through same path in the experiment
        return policies[src_nf]['out']

    @staticmethod
    def compute_flow_hash(pkt: bytes) -> int:
        scapy_pkt = IP(pkt)
        src, dst = scapy_pkt[IP].src, scapy_pkt[IP].dst
        proto = scapy_pkt[IP].proto
        if proto == 6:  # TCP
            src_port, dst_port = scapy_pkt[TCP].sport, scapy_pkt[TCP].dport
        else:
            src_port, dst_port = scapy_pkt[UDP].sport, scapy_pkt[UDP].dport
        flow_hash = hash(src + dst + str(proto) + str(src_port) + str(dst_port))
        return flow_hash

    def generate_flow_identifier(self, flow_hash: int) -> int:  # 对于每个包添加flow_id
        if flow_hash not in self.flow_identifier_map:
            self.flow_id_count += 1
            self.flow_identifier_map[flow_hash] = self.flow_id_count
            if self.flow_id_count == 65535:  # 2^16-1
                self.flow_id_count = 0
        return self.flow_identifier_map[flow_hash]

    def sample(self, flow_hash, alpha=0.1, min_val=0.1):
        if flow_hash not in self.per_flow_sample_para:
            self.per_flow_sample_para[flow_hash] = 1.0
        sample_ratio = min_val if self.per_flow_sample_para[flow_hash] < min_val else self.per_flow_sample_para[
            flow_hash]
        if random() < sample_ratio:
            if sample_ratio > self.per_flow_sample_para[flow_hash]:  # 取最小值了
                return True
            else:
                self.per_flow_sample_para[flow_hash] /= (1 + alpha)  # 抵制计算
                return True
        return False


if __name__ == '__main__':
    _pkt = b'E\x00\x00<\x00\x01\x00\x00@\x06\x11\xe5\xf7\x89\xe9\xcc$\x91b\xef\x8duM\xf1\x00\x00\x00\x00\x00\x00\x00' \
           b'\x00P\x02 \x00!\xca\x00\x00abcdefffffffffffffff'
    _pkt2 = b'E\x00\x00-\x00\x01\x00\x00@\x06\x11\xf4\xf7\x89\xe9\xcc$\x91b\xef\x8duM\xf1\x00\x00\x00\x00\x00\x00\x00' \
            b'\x00P\x02 \x00!\xd9\x00\x00abcdefghl'
    for _pkt in [_pkt]:
        flow_correctness = FlowCorrectness(None, 1, 'GatewayIn-F')
        start = time_ns()
        # 测试网关
        o_pkt = None
        for i in range(100000):
            o_pkt = flow_correctness.generate(_pkt)
            break
            # packet = packet_correctness.exit_gateway(out_pkt)
        print((time_ns() - start) / 10 ** 9, "GatewayIn-F")

        print(o_pkt)
        # 测试NAT
        nat = NAT("172.17.0.2")
        flow_correctness_nat = FlowCorrectness(nat, 10, 'NAT-F')
        start = time_ns()
        for i in range(100000):
            o_pkt = flow_correctness_nat.process(o_pkt)
            break
        print((time_ns() - start) / 10 ** 9, "NAT-F")
        print(o_pkt)

        # 测试merger
        merger = Merger(1, 2)
        flow_correctness_merger = FlowCorrectness(merger, 11, 'Merger-1-2-F')
        outs = None
        start = time_ns()
        for i in range(100000):
            outs = flow_correctness_merger.merge(o_pkt)
            # outs = flow_correctness_merger.merge(o_pkt)
            break
        print((time_ns() - start) / 10 ** 9, "Merger-1-2-F")
        print(outs[0])
        print(outs[1])

        # 测试IDS
        ids = IDS('../NFs/ids_rules.csv')
        flow_correctness_ids = FlowCorrectness(ids, 110, 'IDS-F')
        start = time_ns()
        ids_o, fw_o = None, None
        for i in range(100000):
            ids_o = flow_correctness_ids.process(outs[0])
            break
        print((time_ns() - start) / 10 ** 9, "IDS-F")
        print(ids_o)

        # 测试FW
        fw = Firewall()
        flow_correctness_fw = FlowCorrectness(ids, 1110, 'FW-F')
        start = time_ns()
        for i in range(100000):
            fw_o = flow_correctness_fw.process(outs[1])
            break
        print((time_ns() - start) / 10 ** 9, "FW-F")

        # 测试merger
        merger = Merger(2, 1)
        flow_correctness_merger = FlowCorrectness(merger, 11110, 'Merger-2-1-F')
        outs = None
        start = time_ns()
        for i in range(100000):
            flow_correctness_merger.merge(ids_o)
            outs = flow_correctness_merger.merge(fw_o)
            break
        print((time_ns() - start) / 10 ** 9, "Merger-2-1-F")
        print(outs[0])

        # 测试出网关
        flow_correctness = FlowCorrectness(None, 0, 'GatewayOut-F')
        start = time_ns()
        # 测试网关
        for i in range(100000):
            flow_correctness.exit_gateway(outs[0])
            break
            # packet = packet_correctness.exit_gateway(out_pkt)
        print((time_ns() - start) / 10 ** 9, "GatewayOut-F")
