# coding=utf-8
import json
from datetime import datetime, timedelta

from flask import jsonify

from blues.cloud import bp
from blues.cloud.base import request_asapi
from database2 import db_exec_many
from model.c_department import find_all_depart
from model.c_disk import find_disk_info_by_id_type


def record_cms_ecs_disk() -> bool:
    """
    通过diskusage_utilization获取的磁盘监控数据，只有使用率，没有容量：
    例如，下面是一个ECS实例的三个分区
    {'timestamp': 1697126385000, 'userId': '1200473597202969', 'instanceId': 'i-2xl01brbtt2iecgg8oij', 'device': '/dev/vdc', 'diskname': '/storage', 'hostname': 0.0, 'Average': 0.63, 'Maximum': 0.63, 'Minimum': 0.63}
    {'timestamp': 1697126385000, 'userId': '1200473597202969', 'instanceId': 'i-2xl01brbtt2iecgg8oij', 'device': '/dev/vda1', 'diskname': '/', 'hostname': 0.0, 'Average': 34.15, 'Maximum': 34.15, 'Minimum': 34.15}
    {'timestamp': 1697126385000, 'userId': '1200473597202969', 'instanceId': 'i-2xl01brbtt2iecgg8oij', 'device': '/dev/vdb', 'diskname': '/data/vdb', 'hostname': 0.0, 'Average': 0.02, 'Maximum': 0.02, 'Minimum': 0.02}

    todo：为了计算磁盘实际使用量，故从c_disk_info表中获取数据盘的大小，但是无法从上面的device（挂载点）对应出具体是哪块云盘，
    故下面只获取第一块first数据盘，这里的数据不是很准确。
    绝大部分ECS只挂载了一块数据盘，所以目前来看影响不大，待后续处理
    :return:
    """
    # 取昨日最后的监控数据时间
    end_time = (datetime.today() - timedelta(days=1)).strftime('%Y-%m-%d 23:59:59')

    # 1 对于专有云，通过部门ak sk获取当前组织监控数据
    depart_list = find_all_depart()
    # 初始化一个空数组，在循环中不断追加
    values_list = []
    for depart in depart_list:
        cloud_id = depart['cloud_id']
        depart_id = depart['department']
        depart_name = depart['department_name']
        # 获取一个部门下，所有bucket的列表和信息
        depart_ak = depart['access_key_id']
        depart_sk = depart['access_key_secret']

        # 获取ASAPI接口数据
        params = {
            "Namespace": "acs_ecs_dashboard",
            "MetricName": "diskusage_utilization",
            "EndTime": end_time,
        }
        asapi_data = request_asapi(cloud_id, "Cms", "2019-01-01", "DescribeMetricLast", params, depart_ak, depart_sk)
        # print(asapi_data)
        if not asapi_data:
            return False
        data_points = json.loads(asapi_data['Datapoints'])

        for i in data_points:
            # 去掉k8s挂载目录的情况（longhorn）
            if "longhorn" in i['device']:
                continue
            # Datapoint中取到的磁盘监控数据插入元组
            tmp_tuple = (
                i['instanceId'], cloud_id, depart_id, depart_name, i['device'], i['Maximum'], end_time, datetime.now())
            # 根据device和instanceId来模糊匹配磁盘信息，用于查询磁盘id和容量配额
            # 目前linux系统盘基本为/dev/vda1，Windows系统盘默认为C:\,其他默认为数据盘
            disk_system_text = "/dev/vda1 C:\\"
            disk_type = "system"
            if i['device'] not in disk_system_text:
                disk_type = "data"
            # todo： 查询diskinfo从而获取disk size，由于无监控与磁盘的对应关系，系统盘
            disk_info = find_disk_info_by_id_type(i['instanceId'], disk_type)
            # 数据库里没找到磁盘实例信息的，直接忽略丢弃
            if disk_info == {}:
                continue
            disk_id = disk_info['disk_id']
            disk_size = disk_info['size']
            tmp_tuple = tmp_tuple + (disk_id, disk_size)
            values_list.append(tmp_tuple)

    # 使用上面的数据，批量插入db
    table = 'cms_ecs_disk'
    fields = "instance_id,cloud_id,department,department_name,device,disk_usage,date,record_time,disk_id,disk_size"
    db_success = db_exec_many(table, fields, values_list)
    if not db_success:
        return False
    return True


@bp.route('/api/v1/cloud/record_cms_ecs_disk', methods=['POST'])
def record_cms_ecs_disk_api():
    success = record_cms_ecs_disk()
    if not success:
        return jsonify({'code': 500})
    return jsonify({'code': 200})


if __name__ == '__mian__':
    record_cms_ecs_disk()
