# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
flows.py

This data library represents network flows

It stores cummulative information (not individual packets)
about flows in a MongoDB collection
"""
# For Python 2.x compatibility:
from __future__ import division

# For CSV operations:
import csv
# For flows dictionary:
from collections import OrderedDict

# For math operations:
import numpy as np
from scapy.all import *

# For url tls record
from app.analyze.analyzer import process_packet, utc_stamp_to_utc
# For logging configuration:
from app.utils.baseclass import BaseClass

import flow
from .packet import Packet


class Flows(BaseClass):
    """
    The Flows class represents cummulative information about flows
    (not individual packets)
    """

    def __init__(self, config, mode, searcher):
        """
        Initialise the Flows Class
        Args:
           config: Config class object
           mode: the mode in which the packets should be organised
             into flow records. 'u' is for unidirectional, 'b' is for
             bidirectional.
        """
        # Required for BaseClass:
        self.config = config
        # Set up Logging with inherited base class method:
        self.configure_logging(__name__, "flows_logging_level_s",
                               "flows_logging_level_c")
        # Mode is u for unidirectional or b for bidirectional:
        self.mode = mode
        # Python dictionaries to hold current and archived flow records:
        self.flow_cache = OrderedDict()
        self.flow_archive = OrderedDict()
        self.url_dict = OrderedDict()
        self.searcher = searcher
        # Create a Flow object for flow operations:
        self.flow = flow.Flow(config, self.logger, self.flow_cache,
                         self.flow_archive, mode, searcher)

        # Counter for packets that we ignored for various reasons:
        self.packets_ignored = 0

        # Counter for all the processed packets:
        self.packets_processed = 0

        # Counter for all the host url record:
        self.host_recorded = 0

    @DeprecationWarning
    def ingest_pcap(self, dpkt_reader):
        """
        ingest packet data from dpkt reader of pcap file
        into flows.
        Args:
           dpkt_reader: dpkt pcap reader object (dpkt.pcap.Reader)
        """
        pass
        # infoFrequency = self.config.get_value("infoFrequency")

        # # Process each packet in the pcap:
        # for timestamp, packet in dpkt_reader:
        #     # Instantiate an instance of Packet class with packet info:
        #     packet = Packet(self.logger, timestamp, packet, self.mode, "dpkt")
        #     if packet.ingested:
        #         # Update the flow with packet info:
        #         self.flow.update(packet)
        #         self.packets_processed += 1
        #         if self.packets_processed % infoFrequency == 0:
        #             self.logger.info(
        #                 "Already processed %d packets", self.packets_processed)
        #     else:
        #         self.packets_ignored += 1

    # def ingest_packet(self, hdr, packet):
    def ingest_packet(self, packet):
        """
        ingest a packet from pcapy (live capture) into flows.
        """
        # Get timestamp from header:
        # sec, ms = hdr.getts()
        # timestamp = sec + ms / 1000000
        timestamp = packet.time
        # Instantiate an instance of Packet class with packet info:

        self.get_packet_url(packet)

        packet = Packet(self.logger, timestamp, packet, self.mode, "scapy")

        infoFrequency = self.config.get_value("infoFrequency")

        if packet.ingested:
            # Update the flow with packet info:
            self.flow.update(packet)
            self.packets_processed += 1
            if self.packets_processed % infoFrequency == 0:
                self.logger.info("Already processed %d packets",
                                 self.packets_processed)
        else:
            self.packets_ignored += 1

    def get_packet_url(self, packet):
        url_dict, tls_dict = process_packet(packet, self.searcher)

        if url_dict:
            self.url_dict[str(self.host_recorded)] = url_dict
            self.host_recorded += 1
        if tls_dict:
            self.url_dict[str(self.host_recorded)] = tls_dict
            self.host_recorded += 1

    def write(self, file_name):
        """
        Write all flow records out to CSV file
        """
        with open(file_name, mode='w', newline='') as csv_file:
            if self.mode == 'u':
                # Unidirectional fields:
                fieldnames = ['src_ip', 'src_region', 'src_port', 'dst_ip', 'dst_region', 'dst_port',
                              'proto', 'pktTotalCount', 'octetTotalCount',
                              'min_ps', 'max_ps', 'avg_ps', 'std_dev_ps',
                              'flowStart', 'flowEnd', 'flowDuration',
                              'min_piat', 'max_piat', 'avg_piat', 'std_dev_piat',
                              '_octetTotalCount', '_min_ps', '_max_ps', '_avg_ps',
                              '_std_dev_ps', '_flowDuration', '_avg_piat', '_std_dev_piat']
            else:
                # Bidirectional fields:
                fieldnames = ['src_ip', 'src_port', 'dst_ip', 'dst_port',
                              'proto', 'pktTotalCount', 'octetTotalCount',
                              'min_ps', 'max_ps', 'avg_ps', 'std_dev_ps',
                              'flowStart', 'flowEnd', 'flowDuration',
                              'min_piat', 'max_piat', 'avg_piat', 'std_dev_piat',
                              'f_pktTotalCount', 'f_octetTotalCount',
                              'f_min_ps', 'f_max_ps', 'f_avg_ps', 'f_std_dev_ps',
                              'f_flowStart', 'f_flowEnd', 'f_flowDuration',
                              'f_min_piat', 'f_max_piat', 'f_avg_piat',
                              'f_std_dev_piat',
                              'b_pktTotalCount', 'b_octetTotalCount',
                              'b_min_ps', 'b_max_ps', 'b_avg_ps', 'b_std_dev_ps',
                              'b_flowStart', 'b_flowEnd', 'b_flowDuration',
                              'b_min_piat', 'b_max_piat', 'b_avg_piat',
                              'b_std_dev_piat'
                              ]
            writer = csv.DictWriter(
                csv_file, fieldnames=fieldnames, extrasaction='ignore')
            # Write header:
            writer.writeheader()
            # Write archive flows as rows:
            for flow_dict in self.flow_archive.items():
                try:
                    _flow_dict = self.calculate(flow_dict[1])
                    writer.writerow(_flow_dict)
                except:
                    pass
            # Write current flows as rows:
            for flow_dict in self.flow_cache.items():
                try:
                    _flow_dict = self.calculate(flow_dict[1])
                    writer.writerow(_flow_dict)
                except:
                    pass

    def write_url(self, file_name):
        """
        Write all flow records out to CSV file
        """
        with open(file_name, mode='w', newline='') as csv_file:
            fieldnames = ['type', 'filename', 'host', 'url', 'Source IP', 'Source Port',
                          'Destination IP', 'Destination Port', 'srcregion', 'destregion']
            writer = csv.DictWriter(
                csv_file, fieldnames=fieldnames, extrasaction='ignore')
            # Write header:
            writer.writeheader()
            # Write archive flows as rows:
            for flow_dict in self.url_dict.items():
                # print(flow_dict)
                writer.writerow(flow_dict[1])

    def calculate(self, flow_dict):
        flow_dict['min_ps'] = min(flow_dict['length'])
        flow_dict['max_ps'] = max(flow_dict['length'])
        flow_dict['avg_ps'] = flow_dict['octetTotalCount'] / \
            flow_dict['pktTotalCount']
        flow_dict['std_dev_ps'] = np.std(flow_dict['length'])
        flow_dict['flowDuration'] = (
            flow_dict['flowEnd'] - flow_dict['flowStart'])
        if flow_dict['iats']:
            flow_dict['min_piat'] = min(flow_dict['iats'])
            flow_dict['max_piat'] = max(flow_dict['iats'])
            flow_dict['avg_piat'] = sum(
                flow_dict['iats']) / (flow_dict['pktTotalCount'] - 1)
            flow_dict['std_dev_piat'] = np.std(flow_dict['iats'])
        flow_dict['flowStart'] = utc_stamp_to_utc(flow_dict['flowStart'])
        flow_dict['flowEnd'] = utc_stamp_to_utc(flow_dict['flowEnd'])
        return flow_dict

    def stats(self):
        """
        Log the stats for flows
        """
        self.logger.info("Result statistics")
        self.logger.info("-----------------")
        self.logger.info("Flow Records: %s", len(self.flow_cache))
        self.logger.info("Additional Archived Flow Records: %s",
                         len(self.flow_archive))
        self.logger.info("Ignored Packets: %s", self.packets_ignored)
        self.logger.info("Processed Packets: %s", self.packets_processed)
        self.logger.info("founded host or url: %s", self.host_recorded)
