#!/usr/bin/python2
# -*- coding: utf-8 -*-


import threading
import time
import logging
import os
import sys

from eventlet.queue import Empty
from ryu.ofproto import ofproto_v1_3, ofproto_v1_3_parser

# from Queue import Queue
from eventlet import Queue,sleep

from dependency import DependencyGraph,DependencyNode,Operation


class Executor():
    def __init__(self, update_queue,notify_queue, dps, topo, time_list):
        # super(Executor, self).__init__()
        assert isinstance(update_queue, Queue)
        assert isinstance(notify_queue, Queue)
        self.__update_queue = update_queue
        self.__dps = dps
        self.__notify_queue = notify_queue
        self.__exec_count = 0
        self.__topo = topo
        self.__merge = False
        self.time_list = time_list

    def append_data(self,v):
        self.time_list.append({"type":"seq","value":v})

    def run(self):
        start_time = None
        exit_command = False
        update_count1 = 0
        exec_count1 = 0
        update_consume_time = []
        while True:
            print("exec_count",self.__exec_count)
            if start_time is not None:
                print("time_count", time.time() - start_time)
            try:
                g = self.__update_queue.get()  # TODO for test, wait 10s
            except Empty:
                print("average consume time=%s" % (sum(update_consume_time) / len(update_consume_time)))
                sys.stdout.flush()
                os._exit(0)

            if start_time is None:
                start_time = time.time()

            assert isinstance(g,DependencyGraph)

            if self.__merge:
                glist = [g]
                while not self.__update_queue.empty():
                    tmp = self.__update_queue.get()
                    glist.append(tmp)
                g = self.__merge_list(glist)
                update_count1+=len(glist)
                merge_num = len(glist)
            else:
                update_count1+=1
                merge_num = 1
            exec_count1+=len(g.nodes)

            wait_for_complete_list = []

            # t1 = time.time()
            while True:
                ns = filter(lambda n: len(n.acquire)==0 and n.status==DependencyNode.INIT, g.nodes)
                if len(ns)==0 and len(wait_for_complete_list)==0:
                    break
                for n in ns:
                    self.__exec_op(n.op)
                    self.__exec_count+=1
                    n.status = DependencyNode.WAIT
                    wait_for_complete_list.append(n)
                # time.sleep(0.5)
                # sleep(0.1)
                # for n in ns:
                #     n.status = DependencyNode.EXECED
                #     for nr in n.release:
                #         nr.acquire.remove(n)
                msg = self.__notify_queue.get()
                self.__release_one(wait_for_complete_list,msg)
            # t2 = time.time()
            # print("graph exec time",t2 - t1)
            update_consume_t = time.time()-g.timestamp
            print("update consume time=%s"%update_consume_t)
            update_consume_time.append(update_consume_t)

            self.append_data(update_consume_t)

            # print("update_count=%d, exec_count=%d, time=%s, merge_num=%d" % (update_count1, exec_count1,t2-start_time,merge_num))

            ns = filter(lambda n: n.status == DependencyNode.INIT, g.nodes)
            if len(ns)!=0:
                logging.error("unresolvable dependency")

    def __merge_list(self,glist):
        assert len(glist)>0
        partition_map = dict()

        for g in glist:
            key = (g.ip1,g.ip2)
            val = partition_map.get(key)
            if val is None:
                partition_map[key]=[g]
            else:
                val.append(g)

        for key,val in partition_map.items():
            if len(val)<=1:
                partition_map[key] = val[0]
            else:
                partition_map[key] = DependencyGraph.construct(
                    val[0].path1,
                    val[-1].path2,
                    key[0],
                    key[1]
                )
        return DependencyGraph.merge_different_ippair(partition_map.values())

    def __release_one(self,nodes,msg):
        from ryu.ofproto.ofproto_v1_3_parser import OFPFlowStatsReply
        from ryu.ofproto.ofproto_v1_3_parser import OFPFlowRemoved
        of = ofproto_v1_3
        ofp = ofproto_v1_3_parser
        Match = ofp.OFPMatch
        ActionOutput = ofp.OFPActionOutput
        if isinstance(msg,OFPFlowStatsReply):
            dpid = msg.datapath.id
            for flow in msg.body:
                for n in nodes:
                    op = n.op
                    opm = Match(**op.match)
                    buf = bytearray()
                    opm.serialize(buf,0)
                    f_inst = flow.instructions
                    if (op.dpid == dpid
                        and opm.items() == flow.match.items()
                        and len(f_inst)==1):
                        f_acs = f_inst[0].actions
                        if len(f_acs)==1:
                            ac = f_acs[0]
                            if isinstance(ac,ActionOutput):
                                if ac.port == op.action["output"][0]:
                                    self.__remove_dependency(n)
                                    nodes.remove(n)
                                    break
        elif isinstance(msg,OFPFlowRemoved):
            dpid = msg.datapath.id
            for n in nodes:
                op = n.op
                opm = Match(**op.match)
                buf = bytearray()
                opm.serialize(buf, 0)
                if (op.dpid == dpid
                    and opm.items() == msg.match.items()):
                    self.__remove_dependency(n)
                    nodes.remove(n)
                    break

    def __remove_dependency(self,n):
        if n.status != DependencyNode.WAIT:
            raise Exception("executed")
        n.status = DependencyNode.EXECED
        for nr in n.release:
            if n not in nr.acquire:
                raise Exception("not in nr.acquire")
            nr.acquire.remove(n)

    def __exec_op(self, op):
        of = ofproto_v1_3
        ofp = ofproto_v1_3_parser
        Match = ofp.OFPMatch
        ActionOutput = ofp.OFPActionOutput

        assert isinstance(op,Operation)

        match = Match(**op.match)
        if "output" in op.action:
            action = [ActionOutput(port) for port in op.action['output']]
        else:
            raise Exception("unsupport now")
        dp = self.__dps[op.dpid]
        if op.type == Operation.ADD:
            command = of.OFPFC_ADD
        elif op.type == Operation.MOD:
            command = of.OFPFC_MODIFY
        elif op.type == Operation.DEL:
            command = of.OFPFC_DELETE_STRICT
        else:
            raise Exception("unexpected operation type")
        inst = None
        if op.type != Operation.DEL:
            inst = [ofp.OFPInstructionActions(of.OFPIT_APPLY_ACTIONS,action)]
        msg = ofp.OFPFlowMod(
            datapath=dp,
            command=command,
            priority=op.priority,
            match=match,
            instructions=inst,
            out_port = of.OFPP_ANY,
            out_group =  of.OFPG_ANY,
            flags = of.OFPFF_SEND_FLOW_REM
        )
        # print(msg)
        dp.send_msg(msg)

        if op.type == Operation.DEL:
            return

        req = ofp.OFPFlowStatsRequest(dp,0,
                                table_id = 0,
                                out_port = of.OFPP_ANY,
                                out_group = of.OFPG_ANY,
                                match=match)
        # print(req)
        dp.send_msg(req)