#!/usr/bin/env python2
#-*- coding: utf-8 -*-

import json

from Ump import defs
from Ump.objs.db import models
from Ump.objs.session_wrapper import enable_log_and_session, _sw
from Ump.jsonify import sqlalchemy_obj_to_dict

#        class name
keys = ('class_name', 'name', 'category', 'value_setting', 'unit', 'is_update_schduel_job', 'description')

config_kv = {
    'latency': [
        ('AlertJob', 'latency.threshold', 'monitor', '800', 'microseconds', False, 'latency.threshold_description'),
        ('AlertJob', 'latency.enabled', 'monitor', 'True', None, True, 'latency.enabled_description'),
        ('AlertJob', 'latency.interval', 'monitor', '60', 'seconds', True, 'latency.interval_description')
        ],
    'volume_number': [
        ('AlertJob', "volume_number.threshold", "monitor", "100", "number", False, "volume_number.threshold_description"),
        ('AlertJob', "volume_number.enabled", "monitor", 'True', None, True, "volume_number.enabled_description"),
        ('AlertJob', "volume_number.interval", "monitor", "60", "seconds", True, "volume_number.interval_description"),
    ],
    'disk_capacity': [
        ('AlertJob', "disk_capacity.threshold", "monitor", "80", "percent", False, "disk_capacity.threshold_description"),
        ('AlertJob', "disk_capacity.enabled", "monitor", 'True', None, True, "disk_capacity.enabled_description"),
        ('AlertJob', "disk_capacity.interval", "monitor", "60", "seconds", True, "disk_capacity.interval_description"),
    ],
    'CPU_usage': [
        ('AlertJob', "CPU_usage.threshold", "monitor", "80", "percent", False, "CPU_usage.threshold_description"),
        ('AlertJob', "CPU_usage.enabled", "monitor", 'True', None, True, "CPU_usage.enabled_description"),
        ('AlertJob', "CPU_usage.interval", "monitor", "60", "seconds", True, "CPU_usage.interval_description"),
    ],
    'memory_usage': [
        ('AlertJob', "memory_usage.threshold", "monitor", "80", "percent", False, "memory_usage.threshold_description"),
        ('AlertJob', "memory_usage.enabled",  "monitor", 'True', None, True, "memory_usage.enabled_description"),
        ('AlertJob', "memory_usage.interval", "monitor", "60", "seconds", True, "memory_usage.interval_description"),
    ],
    'network_bandwidth': [
        ('AlertJob', "network_bandwidth.threshold", "monitor", "80", "percent", False, "network_bandwidth.threshold_description"),
        ('AlertJob', "network_bandwidth.enabled", "monitor", 'True', None, True, "network_bandwidth.enabled_description"),
        ('AlertJob', "network_bandwidth.interval", "monitor", "60", "seconds", True, "network_bandwidth.interval_description"),
    ],
    'oplog_export': [
        ("OplogJob", "oplog_export.interval", "Logging", "86400", "seconds", True, "oplog_export.interval_description"),
        ("OplogJob", "oplog_export.enabled", "Logging", 'False', None, True, "oplog_export.enabled_description"),
        ("OplogJob", "oplog_export.start_date", "Logging", "", None, True, "oplog_export.start_date_description"),
        ("OplogJob", "oplog_export.path", "Logging", "/home", None, True, "oplog_export.path_description"),
    ],
    'mail_receive': [
        (" ", "mail_receive.list", "mail_warning", " ", " ", False, "mail_receive.list_description"),
    ],
    'data_recover': [
         ("RecoverJob", "data_recover.qos_limit", "datarecover", "104857600", "Bps", False, "dc_qoslimit_description"),
         ("RecoverJob", "data_recover.interval", "datarecover", "30", "minutes",  True, "dc_interval_description"),
    ],
    'volume_delete': [
        (" ", "volume_delete.period", "volume_delete", "7", "days", False, "volume_delete.period_description"),
    ],
}


FUSION_NAS_CONFIG = ['mail_receive', 'memory_usage', 'CPU_usage', 'disk_capacity']
COLUMNS_FOR_SCHEDULE_JOB = ['enabled', 'interval']


def parse_split(s, sep_char=' '):
    l = s.split(sep_char)
    res = [ x.strip() for x in l if x.strip()]
    return res 


def parse_name(s, sep_char='.'):
    return parse_split(s, sep_char)


def update_interval(job_kv, sysconfig_kv, column):
    if column == 'interval':
        every = sysconfig_kv.get('value_setting')
        period = sysconfig_kv.get('unit')
        assert period
        assert every
        job_kv['every'] = every
        job_kv['period'] = period
    return job_kv


def update_enabled(job_kv, sysconfig_kv, column):
    if column == 'enabled':
        value_setting = sysconfig_kv.get('value_setting')
        false_list = ['false', 'False', False]
        enabled = False if value_setting  in false_list else True
        job_kv[column] = enabled
    return job_kv


def update_default_settings(job_kv):
    job_kv['schedule_type'] = 'interval'
    return job_kv


def init_sysconfig_with_group(group_name, group_list):
    for value in group_list: 
        sysconfig_kv = dict(zip(keys, value))
        name = sysconfig_kv.get('name')
        sysconfig_kv['group_name'] = group_name
        if _sw.get_one(models.SysconfigForUMP, id_or_spec={'name': name}):
            continue

        models.SysconfigForUMP(sysconfig_kv).save()

    
def do_init_schedule_job(group_name):
    sysconfig_groups = _sw.get_list(models.SysconfigForUMP, spec={'group_name':group_name})
    job_kv = {'group_name': group_name}

    for sysconfig in sysconfig_groups:
        name, column = parse_name(sysconfig.name)
        sysconfig_kv = sqlalchemy_obj_to_dict(sysconfig)

        class_name = dict(zip(keys, config_kv[group_name][0])).get('class_name')
        assert class_name

        job_kv['class_name'] = class_name
        job_kv['context'] = json.dumps({'job_name': name})

        job_kv = update_interval(job_kv, sysconfig_kv, column)
        job_kv = update_enabled(job_kv, sysconfig_kv, column)
        job_kv = update_default_settings(job_kv)

    models.ScheduleJob(job_kv).save()


def init_schedule_job():
    sysconfig_for_umps = _sw.get_list(models.SysconfigForUMP)
    for sysconfig_for_ump in sysconfig_for_umps:
        group_name = sysconfig_for_ump.group_name

        if _sw.get_one(models.ScheduleJob, id_or_spec={'group_name': group_name}):
            continue
        if sysconfig_for_ump.name in ["data_recover.qos.limit", "mail_receive.list", "volume_delete.period"]:
            continue

        do_init_schedule_job(group_name)
        

def main():
    for group_name, group_list in config_kv.iteritems():
        if defs.is_fusionnas and group_name not in FUSION_NAS_CONFIG: 
            continue

        init_sysconfig_with_group(group_name, group_list)
    init_schedule_job()
    return 

if __name__ == '__main__':
    main()
