#!/usr/bin/env python
# coding=utf-8
import argparse
import signal
import sys
import time
import json
import copy
import traceback

from common.log import init, info, warn, debug, error
from common.rabbitmq_interface import RabbitmqService, pika
from common.utils import byteify
from common.aliyun_product_operation_interface import ALiYunDiskOperation
import aliyun_disk_db_model
from aliyun_disk_db_model import AliyunDiskDBExecutor
from common.setting import Cmop2ParametersAdapter, ConfigureDiscovery

ALIYUN_SERVER_LOG = "aliyun_disk.log"
ALIYUN_SERVER_CONF = "service.conf"
COMPUTE_EXCHANGE = "vulcanus-iaas-computer"
LOG_EXCHANGE = "vulcanus-iaas-log"
TASK_ROUTE_KEY = "vulcanus.task.create"

def _signal_handler(signum, frame):

    info("System exit.")
    sys.exit(0)

def task_report(req, repo, route_key):

    task_rep = {}
    rs = RabbitmqService()
    task_rep["taskCode"] = repo["taskCode"]
    task_rep["action"] = route_key
    task_rep["username"] = repo["username"]
    task_rep["status"] = repo["statusCode"]
    task_rep["requestData"] = req
    task_rep["responseData"] = json.dumps(repo)
    if isinstance(repo["infos"]["requestId"], list):
        for request_id in repo["infos"]["requestId"]:
            task_rep["requestId"] = request_id
            tr_body = json.dumps(task_rep)
            rs.publish(exchange=LOG_EXCHANGE, routing_key=TASK_ROUTE_KEY, message=tr_body)
    else:
        task_rep["requestId"] = repo["infos"]["requestId"]
        tr_body = json.dumps(task_rep)
        rs.publish(exchange=LOG_EXCHANGE, routing_key=TASK_ROUTE_KEY, message=tr_body)

def rpc_query_common(func):

    def wrapper(method, properties, body):
        info("[x] Received %r" % (body))
        info("[x] Begin to do %s ..." % (method.routing_key))
        # publish message to vulcanus-iaas-log access successful
        rs = RabbitmqService()
        req = json.loads(body, object_hook=byteify)

        # convert the data to current format.
        cmop2_adapter = Cmop2ParametersAdapter()
        request_info = cmop2_adapter.input_adapt(req)

        repo = func(**request_info)
        response_info = cmop2_adapter.output_adapt(repo)

        rs.publish(exchange='', routing_key=properties.reply_to,
                   properties=pika.BasicProperties(correlation_id=properties.correlation_id),
                   message=json.dumps(response_info))

        # publish message to vulcanus-iaas-log with task report
        task_report(body, response_info, "%s.result" % (method.routing_key))

    return wrapper

@rpc_query_common
def rpc_query_disks(ak, sk, region_id, **kwargs):
    aliy_op = ALiYunDiskOperation(ak, sk, region_id)
    repo = {}
    repo["Infos"] = aliy_op.DescribeDisks(**kwargs)
    return repo

def rpc_process(ch, method, properties, body):

    # debug("Get rpc call body : %r" % (body))
    ALIYUN_RPC_CALLBACK_DICT = {
        "vulcanus.disk.query.list": rpc_query_disks
    }
    # report to log the rpc is began
    ch.basic_publish(exchange=LOG_EXCHANGE, routing_key="result.access.successful",
                     body="Accept rpc request successful!")

    if method.routing_key in ALIYUN_RPC_CALLBACK_DICT:
        evfunc = ALIYUN_RPC_CALLBACK_DICT[method.routing_key]
        evfunc(method, properties, body)
        # publish message to vulcanus-iaas-log execute complete
        ch.basic_publish(exchange=LOG_EXCHANGE, routing_key="result.access.successful",
                         body="Process %s request complete!" % (method.routing_key))
    else:
        msg = "There is no callback to process request %s." % (method.routing_key)
        ch.basic_publish(exchange=LOG_EXCHANGE, routing_key="result.access.failure",
                         body=msg)

    ch.basic_ack(delivery_tag=method.delivery_tag)

def event_process_common(func):
    def wrapper(method, properties, body):
        info("[x] Received %r" % (body))
        info("[x] Begin to do %s ..." % (method.routing_key))
        # publish message to vulcanus-iaas-log access successful
        rs = RabbitmqService()

        # get basic info
        req = json.loads(body, object_hook=byteify)

        # convert the data to current format.
        cmop2_adapter = Cmop2ParametersAdapter()
        request_info = cmop2_adapter.input_adapt(req)
        repo = func(**request_info)
        response_info = cmop2_adapter.output_adapt(repo)

        msg = json.dumps(response_info)

        # publish message to vulcanus-iaas-computer with execute result
        result_key = "%s.result" % (method.routing_key)
        rs.publish(exchange=COMPUTE_EXCHANGE, routing_key=result_key,
                   message=msg)

        # publish message to vulcanus-iaas-log with task report
        task_report(body, response_info, result_key)

    return wrapper

def _fill_disk_info(ak, sk, region_id, infos, **kwargs):
    # query the instance disks
    disk = {}
    aliy_disk_op = ALiYunDiskOperation(ak, sk, region_id)
    disk_ret = None
    for i in range(1, 5):
        disk_ret = aliy_disk_op.DescribeDisks(**kwargs)
        if disk_ret["Disks"]["Disk"]:
            # assume the first item is system disk.
            # there is only one disk with instance creation complete.
            disk = disk_ret["Disks"]["Disk"][0]
            for k, v in disk.items():
                nk = "%s%s" % (k[0].lower(), k[1:])
                infos[nk] = v
            break
        else:
            warn("There is no disk information!")
        time.sleep(10)
    if disk_ret:
        disk["RequestId"] = disk_ret["RequestId"]
        disk["StatusCode"] = disk_ret["StatusCode"]
        disk["Message"] = disk_ret["Message"]
    return disk

@event_process_common
def event_create_disk(ak, sk, region_id, **kwargs):

    # do the action
    # debug("Get ak %s, sk %s, region id %s." % (ak, sk, region_id))
    # time.sleep(5)
    # ret = {"StatusCode":"200", "Message":"Successful", "InstanceId":"abcdefg", "RequestId":"123456"}

    aliy_op = ALiYunDiskOperation(ak, sk, region_id)
    aliy_db = AliyunDiskDBExecutor()

    created_disk_info = {"TaskCode": "", "InstanceId": "", "RegionId": region_id, "ZoneId": "", "DiskId": "",
                         "DiskName": "", "Description": "", "DiskCategory": "", "DiskType": "",
                         "DiskChargeType": "PostPaid", "Size": 40, "DeleteWithInstance": True,
                         "DeleteAutoSnapshot": True, "EnableAutoSnapshot": False, "Status": "Available",
                         "Portable": False, "SecurityEnhancementStrategy": "Deactive"}
    # 需要传入disk id
    ret = byteify(aliy_op.CreateDisk(**kwargs)) if kwargs["DiskType"] != "system" else {"StatusCode":"200",
                                                                                        "Message":""}
    repo = dict(kwargs, **ret)
    repo["Infos"] = {}
    if "DiskId" in ret:
        disk_info = _fill_disk_info(ak, sk, region_id, repo["Infos"], DiskIds=str([ret["DiskId"]]))
        repo["DiskType"] = disk_info["Type"]
        repo = dict(repo, **disk_info)
    elif kwargs["DiskType"] == "system":
        disk_info = _fill_disk_info(ak, sk, region_id, repo["Infos"], InstanceId=kwargs["InstanceId"])
        repo = dict(repo, **disk_info)

    if repo["StatusCode"] == "200":
        # fill the item
        for k, v in repo.items():
            if k in created_disk_info:
                created_disk_info[k] = v

        aliy_db.insert(**created_disk_info)
    else:
        error("Failed to create disk due to %s." % (repo["Message"]))

    return repo

@event_process_common
def event_delete_disk(ak, sk, region_id, **kwargs):
    ret = {"StatusCode":"200", "Message":""}
    aliy_op = ALiYunDiskOperation(ak, sk, region_id)
    # need to query if disk is existed
    disks = [kwargs["DiskId"]]
    disk_ret = aliy_op.DescribeDisks(DiskIds=str(disks))
    if disk_ret["Disks"]["Disk"]:
        # existed, delete it
        ret = aliy_op.DeleteDisk(**kwargs)
    else:
        # deleted already
        ret["RequestId"] = disk_ret["RequestId"]
    repo = dict(kwargs, **ret)

    if repo["StatusCode"] == "200":
        # delete the record
        aliy_db = AliyunDiskDBExecutor()
        aliy_db.delete(DiskId=kwargs["DiskId"])

    return repo

def _event_modify_common(op_func, update_keys, db_func=None, **kwargs):
    repo = {}
    kwargs_keys = set(kwargs.keys())
    k_update_keys = update_keys & kwargs_keys
    if k_update_keys:
        update_dict = {}
        for update_key in k_update_keys:
            update_dict[update_key] = kwargs[update_key]
        ret = op_func(**kwargs)
        if "Filters" not in kwargs:
            kwargs["Filters"] = {"DiskId": kwargs["DiskId"]}

        if db_func:
            db_func(update_dict=update_dict, **kwargs["Filters"])

        repo = dict(kwargs, **ret)
        if "Infos" not in repo:
            repo["Infos"] = {}

        for k, v in update_dict.items():
            nk = "%s%s" % (k[0].lower(), k[1:])
            repo["Infos"][nk] = v
    else:
        repo["StatusCode"] = "Error"
        repo["Message"] = "Failed to get any parameters to update (%s)." % (op_func.__name__)
        repo["Infos"] = {}
        repo["Infos"]["requestId"] = ""
        repo = dict(repo, **kwargs)

    return repo

def _update_diskattribute_result(update_dict, **kwargs):
    aliy_db = AliyunDiskDBExecutor()
    aliy_db.update(update_dict=update_dict, **kwargs)

@event_process_common
def event_modify_diskattribute(ak, sk, region_id, **kwargs):
    aliy_op = ALiYunDiskOperation(ak, sk, region_id)
    update_keys = {"DiskName", "Description", "DeleteWithInstance",
                   "DeleteAutoSnapshot", "EnableAutoSnapshot"}
    repo = _event_modify_common(aliy_op.ModifyDiskAttribute, update_keys,
                                _update_diskattribute_result, **kwargs)

    return repo

def _event_operation_common(op_func, **kwargs):
    ret = byteify(op_func(**kwargs))
    repo = dict(kwargs, **ret)
    if "Infos" not in repo:
        repo["Infos"] = {}

    return repo

@event_process_common
def event_attach_disk(ak, sk, region_id, **kwargs):

    repo = {}
    aliy_op = ALiYunDiskOperation(ak, sk, region_id)
    aliy_db = AliyunDiskDBExecutor()
    # need to record attach relationship between instance and disk
    ret = aliy_db.query(DiskId=kwargs["DiskId"])
    if ret:
        repo = _event_operation_common(aliy_op.AttachDisk, **kwargs)
        if repo["StatusCode"] == "200":
            filter_dict = {"DiskId": kwargs["DiskId"]}
            update_dict = {"InstanceId": kwargs["InstanceId"],
                           "DiskId": kwargs["DiskId"],
                           "DeleteWithInstance": kwargs["DeleteWithInstance"]}
            try:
                aliy_db.update(update_dict=update_dict, **filter_dict)
                if "Infos" not in repo:
                    repo["Infos"] = {}
                for k, v in update_dict.items():
                    nk = "%s%s" % (k[0].lower(), k[1:])
                    repo["Infos"][nk] = v

            except Exception as ex:
                error("Failed to update related table for attach operation due to %s." % (ex))
                error(traceback.format_exc())
        else:
            error("Failed to attach disk due to %s." % (repo["Message"]))
    else:
        # create new attach relationship
        created_disk_info = {"TaskCode": "", "InstanceId": "", "RegionId": region_id, "ZoneId": "", "DiskId": "",
                             "DiskName": "", "Description": "", "DiskCategory": "", "DiskType": "",
                             "DiskChargeType": "PostPaid", "Size": 40, "DeleteWithInstance": True,
                             "DeleteAutoSnapshot": True, "EnableAutoSnapshot": False, "Status": "Available",
                             "Portable": False, "SecurityEnhancementStrategy": "Deactive"}

        ret = aliy_op.DescribeDisks(InstanceId=kwargs["InstanceId"])
        repo = dict(repo, **kwargs)
        repo = dict(repo, **ret)
        try:
            for k, v in kwargs.items():
                if k in created_disk_info:
                    created_disk_info[k] = v

            for disk in ret["Disks"]["Disk"]:
                if disk["DiskId"] == kwargs["DiskId"]:
                    for k, v in disk.items():
                        if k in created_disk_info:
                            created_disk_info[k] = v

            aliy_db.insert(**created_disk_info)

        except Exception as ex:
            error("Failed to create a new attach relationship due to %s." % (ex))
            error(traceback.format_exc())

    return repo

@event_process_common
def event_detach_disk(ak, sk, region_id, **kwargs):
    repo = {}
    aliy_op = ALiYunDiskOperation(ak, sk, region_id)
    aliy_db = AliyunDiskDBExecutor()
    # need to query if disk is existed
    disks = [kwargs["DiskId"]]
    disk_ret = aliy_op.DescribeDisks(DiskIds=str(disks))
    if disk_ret["Disks"]["Disk"]:

        # not empty check if disk has already dettached instance
        # we only get one disk info here
        if disk_ret["Disks"]["Disk"][0]["InstanceId"] and \
                        disk_ret["Disks"]["Disk"][0]["InstanceId"] == kwargs["InstanceId"]:
            repo = _event_operation_common(aliy_op.DetachDisk, **kwargs)
        else:
            repo = dict(repo, **disk_ret)
            repo["StatusCode"] = "200"
            repo["Message"] = "Disk has already atached from instance."
            repo = dict(repo, **kwargs)

        if repo["StatusCode"] == "200":
            # update db to detach disk and instance.
            filter_dict = {"DiskId": kwargs["DiskId"]}
            update_dict = {"InstanceId": "", "DiskId": kwargs["DiskId"]}
            try:
                aliy_db.update(update_dict=update_dict, **filter_dict)
                if "Infos" not in repo:
                    repo["Infos"] = {}

                for k, v in update_dict.items():
                    nk = "%s%s" % (k[0].lower(), k[1:])
                    repo["Infos"][nk] = v

            except Exception as ex:
                error("Failed to update related table for detach operation due to %s." % (ex))
                error(traceback.format_exc())

    else:
        repo = dict(repo, **kwargs)
        repo = dict(repo, **disk_ret)
        try:
            # empty, do nothing and delete the record
            aliy_db.delete(DiskId=kwargs["DiskId"])
            repo["Message"] = "The disk dose not exist, dettach ignored."
        except Exception as ex:
            repo["Message"] = str(ex)
            error("Failed to delete related records for %s due to %s." % (kwargs["DiskId"], ex))
            error(traceback.format_exc())

    # need to clean attach relationship between instance and disk
    return repo

@event_process_common
def event_reinit_disk(ak, sk, region_id, **kwargs):
    aliy_op = ALiYunDiskOperation(ak, sk, region_id)
    repo = _event_operation_common(aliy_op.ReInitDisk, **kwargs)
    return repo

@event_process_common
def event_reset_disk(ak, sk, region_id, **kwargs):
    aliy_op = ALiYunDiskOperation(ak, sk, region_id)
    repo = _event_operation_common(aliy_op.ResetDisk, **kwargs)
    return repo

@event_process_common
def event_replacesystem_disk(ak, sk, region_id, **kwargs):
    aliy_op = ALiYunDiskOperation(ak, sk, region_id)
    repo = _event_operation_common(aliy_op.ReplaceSystemDisk, **kwargs)
    if repo["StatusCode"] == "200":
        # update related record with new disk id
        aliy_db = AliyunDiskDBExecutor()
        filter_dict = {"InstanceId": kwargs["InstanceId"], "DiskType": "system"}
        update_dict = {"DiskId": repo["DiskId"], "Size": kwargs["SystemDiskSize"],
                       "SecurityEnhancementStrategy": kwargs["SecurityEnhancementStrategy"],
                       "DiskName": "", "Description": ""}
        try:
            aliy_db.update(update_dict=update_dict, **filter_dict)
            if "Infos" not in repo:
                repo["Infos"] = {}
            for k, v in update_dict.items():
                nk = "%s%s" % (k[0].lower(), k[1:])
                repo["Infos"][nk] = v
        except Exception as ex:
            error("Failed to update related table for replace system operation due to %s." % (ex))
            error(traceback.format_exc())
    else:
        error("Failed to replace system disk due to %s." % (repo["Message"]))

    return repo

@event_process_common
def event_resize_disk(ak, sk, region_id, **kwargs):
    aliy_op = ALiYunDiskOperation(ak, sk, region_id)
    repo = _event_operation_common(aliy_op.ResizeDisk, **kwargs)
    if repo["StatusCode"] == "200":
        # update related record with new disk size
        aliy_db = AliyunDiskDBExecutor()
        filter_dict = {"DiskId": kwargs["DiskId"]}
        update_dict = {"Size": kwargs["NewSize"]}
        try:
            aliy_db.update(update_dict=update_dict, **filter_dict)
            if "Infos" not in repo:
                repo["Infos"] = {}
            for k, v in update_dict.items():
                nk = "%s%s" % (k[0].lower(), k[1:])
                repo["Infos"][nk] = v
        except Exception as ex:
            error("Failed to update related table for resize disk operation due to %s." % (ex))
            error(traceback.format_exc())
    else:
        error("Failed to resize disk due to %s." % (repo["Message"]))

    return repo

def instance_event_process(ch, method, properties, body):

    ALIYUN_EVENT_CALLBACK_DICT = {
        "vulcanus.disk.create": event_create_disk,
        "vulcanus.disk.delete": event_delete_disk,
        "vulcanus.disk.modify.attribute": event_modify_diskattribute,
        "vulcanus.disk.operation.attach": event_attach_disk,
        "vulcanus.disk.operation.detach": event_detach_disk,
        "vulcanus.disk.operation.reinit": event_reinit_disk,
        "vulcanus.disk.operation.reset": event_reset_disk,
        "vulcanus.disk.operation.replacesystem": event_replacesystem_disk,
        "vulcanus.disk.operation.resize": event_resize_disk
    }

    ch.basic_publish(exchange=LOG_EXCHANGE, routing_key="result.access.successful",
                     body="Accept %s request successful!" % (method.routing_key))

    if method.routing_key in ALIYUN_EVENT_CALLBACK_DICT:
        evfunc = ALIYUN_EVENT_CALLBACK_DICT[method.routing_key]
        try:
            evfunc(method, properties, body)
            # publish message to vulcanus-iaas-log execute complete
            ch.basic_publish(exchange=LOG_EXCHANGE, routing_key="result.access.successful",
                             body="Process %s request complete!" % (method.routing_key))
        except Exception as ex:
            msg = "There are some exceptions happened due to %s." % (str(ex))
            error(traceback.format_exc())
            error(msg)
            ch.basic_publish(exchange=LOG_EXCHANGE, routing_key="result.access.failure",
                             body=msg)
    else:
        msg = "There is no callback to process request %s." % (method.routing_key)
        ch.basic_publish(exchange=LOG_EXCHANGE, routing_key="result.access.failure",
                         body=msg)

    ch.basic_ack(delivery_tag=method.delivery_tag)

def start_rabbitmq_mode(args):
    # consumer event create.instance
    # publish event create.instance.log
    # publish event create.instance.stats
    host = args.rabbitmq_host
    port = args.rabbitmq_port
    user = args.rabbitmq_user
    password = args.rabbitmq_password
    rs = RabbitmqService(host, port, user, password)

    # create a channel, if one message can't be ack
    # the queue will be ignore
    rs.channel_create(prefetch_count=1)

    # # create a rpc server
    # rs.create_rpc_server(queue_name="Vulcanus.ecs.query", rpc_callback=rpc_process)

    # create two exchanges
    # declare exchange
    rs.exchange_declare(exchange=COMPUTE_EXCHANGE, type="topic", durable=True)
    rs.exchange_declare(exchange=LOG_EXCHANGE, type="fanout", durable=True)

    # bind queue
    # args = {"x-message-ttl": 60000}
    # related route keys
    route_keys = ["vulcanus.disk.create", "vulcanus.disk.delete",
                  "vulcanus.disk.modify.attribute", "vulcanus.disk.operation.attach",
                  "vulcanus.disk.operation.detach", "vulcanus.disk.operation.reinit",
                  "vulcanus.disk.operation.reset", "vulcanus.disk.operation.replacesystem",
                  "vulcanus.disk.operation.resize"]
    q_name = "aliyun_disk_service"
    rs.queue_bind(COMPUTE_EXCHANGE, route_keys=route_keys,
                  queue_name=q_name, durable=True)
    rs.regist_consume(q_name, instance_event_process)

    # bind queue for rpc
    route_keys = ["vulcanus.disk.query.list"]
    q_name = "aliyun_disk_rpc"
    rs.queue_bind(COMPUTE_EXCHANGE, route_keys=route_keys,
                  queue_name=q_name, durable=True)
    rs.regist_consume(q_name, rpc_process)

    # start consuming
    rs.start_consuming()
    signal.signal(signal.SIGINT, _signal_handler)
    rs.consuming_loop()

def argument_parser():
    parser = argparse.ArgumentParser(description="Aliyun ECS Operation CLI.")

    # create actions subcommand
    sparsers = parser.add_subparsers(title="Actions", description="Actions of the service.",
                                     help="Action name, use -h after it for more details.")
    # start rabbitmq server
    sparser_rabbitmq_conn = sparsers.add_parser("start-rabbitmq-to-connect",
                                                help="Start as a worker service for API Server events exchange. Note: if you have multiple events to confim and public plse use config file.")
    sparser_rabbitmq_conn.set_defaults(func=start_rabbitmq_mode)

    sparser_rabbitmq_conn.add_argument("-H", "--host", action="store", type=str, dest="rabbitmq_host",
                                       help="Rabbitmq server host to connect.", required=False, default="localhost")
    sparser_rabbitmq_conn.add_argument("-p", "--port", action="store", type=int, dest="rabbitmq_port",
                                       help="Rabbitmq server port to connect.", required=False, default=5672)
    sparser_rabbitmq_conn.add_argument("-u", "--user", action="store", type=str, dest="rabbitmq_user",
                                       help="Rabbitmq server user to connect.", required=False, default="guest")
    sparser_rabbitmq_conn.add_argument("-P", "--password", action="store", type=str, dest="rabbitmq_password",
                                       help="Rabbitmq server password to connect.", required=False, default="guest")
    sparser_rabbitmq_conn.add_argument("-c", "--config", action="store", type=str, dest="conf_path",
                                       help="Server config file.", required=False, default=ALIYUN_SERVER_CONF)
    sparser_rabbitmq_conn.add_argument("-d", "--daemon", action="store", type=bool, dest="daemon",
                                       help="Server daemonized flag.", required=False, default=False)
    sparser_rabbitmq_conn.add_argument("-l", "--log-path", action="store", type=str, dest="log_path",
                                       help="Server log path.", required=False, default=ALIYUN_SERVER_LOG)

    return parser.parse_args()

if __name__ == "__main__":
    args = argument_parser()
    init(logpath=args.log_path)
    cd = ConfigureDiscovery()
    cmop2_adapter = Cmop2ParametersAdapter()
    cd.regist_observer("input_mapping", cmop2_adapter.input_notify)
    cd.regist_observer("output_mapping", cmop2_adapter.output_notify)
    cd.regist_observer("db", aliyun_disk_db_model.notify)
    cd.start_observe_conf(conf_file=args.conf_path)
    cd.notify()
    args.func(args)
