# https://docs.cloudera.com/documentation/enterprise/6/6.3/topics/cm_dg_yarn_applications.html
# https://cloudera.github.io/cm_api/apidocs/v6/path__clusters_-clusterName-_services_-serviceName-_yarnApplications.html
# https://archive.cloudera.com/cm6/6.3.1/generic/jar/cm_api/swagger-html-sdk-docs/python/docs/YarnApplicationsResourceApi.html
from datetime import datetime, timedelta

import pandas as pd
from dateutil.parser import parse, ParserError
from sqlalchemy.orm import Session

from app.bigdata.models.yarn_job_info import YarnJobInfo
from app.bigdata.handlers.cm_handler import CM6
from app.bigdata.utils import time_calc, collection, unit_convert
from core.logger import logger


class YarnHandler(CM6):

    def __init__(self):
        super().__init__()

    def get_yarn_jobs(self, cluster_name, service_name='yarn', from_time=None, to_time=None, filter='', offset=0, limit=1000):
        """
        :param cluster_name:
        :param service_name:
        :param filter:
        :param from_time:  '2023-01-11 17:20:09'
        :param to_time:    '2023-01-11 17:25:09'
        :param offset:
        :param limit:
        :return:
        """
        api_url = self.api_v17_init()
        try:
            api_instance = self.cm_client.YarnApplicationsResourceApi(self.cm_client.ApiClient(api_url))
            if not from_time or not to_time:
                now = datetime.now()
                from_time = (now - timedelta(minutes=5))
                to_time = now
            else:
                try:
                    from_time, to_time = parse(from_time), parse(to_time)
                except (TypeError, ParserError):
                    logger.error(f"get yarn jobs response => {ParserError}")
                    return []
            yarn_jobs_response = api_instance.get_yarn_applications(cluster_name, service_name, _from=from_time, to=to_time, filter=filter,
                                                                    limit=limit, offset=offset)
            logger.info(f"get yarn jobs response => amount: {len(yarn_jobs_response.applications)}")
            if yarn_jobs_response.warnings:
                logger.warning(f"get yarn jobs response => lost data time: {yarn_jobs_response.warnings:}")
        except Exception as e:
            logger.error(f"get yarn jobs response => {e}")
            return []
        return yarn_jobs_response.applications


def yarn_jobs_report_handler(begin_date, end_date):
    logger.info(f"yarn jobs report handler => datetime_range: {begin_date}-{end_date}")
    try:
        data = []
        yarn_handler = YarnHandler()
        items = yarn_handler.get_yarn_jobs(cluster_name='Cluster 1', service_name='yarn', from_time=begin_date, to_time=end_date,
                                           filter="application_type != 'Apache Flink'", limit=1000, offset=0)
        if not items:
            return []

        for item in items:
            item = item.to_dict()
            end = None
            if item.get('end_time'):
                end = collection(item.get('end_time'), cst=False, dtype='cst_str')
            data.append({'application_id': item.get('application_id'), 'user': item.get('user'), 'resource_pool': item.get('pool'),
                         'hive_query_id': item['attributes'].get('hive_query_id'),
                         'hive_query_string': item['attributes'].get('hive_query_string'),
                         'cpu_milliseconds': item['attributes'].get("cpu_milliseconds"), 'state': item.get("state"),
                         'start_time': collection(item.get('start_time'), cst=False, dtype='cst_str'),
                         'end_time': end, 'job_response': item})
    except Exception as e:
        logger.error(f"yarn jobs report handler => {e}")
        return []

    return data


def get_yarn_job_duration_top(date: str, db: Session, top: int):
    try:
        data = []
        items = YarnJobInfo.get_duration_top_by_date(date, db, top)
        if not items:
            return [], []

        for item in items:
            data.append({"application_id": item['application_id'], "resource_pool": item['resource_pool'], "user": item['user'],
                         "hive_query_id": item.get('hive_query_id'), "duration": item['duration'],
                         "cpu_milliseconds": item['cpu_milliseconds'], "start_time": item['start_time'],
                         "end_time": item['end_time']})
        df = pd.DataFrame(data)
        data = df.to_dict(orient='split')
        column, content = data['columns'], data['data']
        return column, content
    except Exception as e:
        logger.error(f"get_yarn_job_duration_top => {e}")
        return [], []


def get_yarn_job_cpu_top(date: str, db: Session, top: int):
    try:
        data = []
        items = YarnJobInfo.get_cpu_milliseconds_top_by_date(date, db, top)
        if not items:
            return [], []
        for item in items:
            # duration = None
            # if item.get('duration'):
            #     duration = time_calc(item['duration'])
            # cpu_milliseconds = None
            # if item.get('cpu_milliseconds'):
            #     duration = time_calc(item['cpu_milliseconds'])

            data.append({"application_id": item['application_id'], "resource_pool": item['resource_pool'], "user": item['user'],
                         "hive_query_id": item.get('hive_query_id'), "duration": item['duration'],
                         "cpu_milliseconds": item['cpu_milliseconds'], "start_time": item['start_time'],
                         "end_time": item['end_time']})
        df = pd.DataFrame(data)
        data = df.to_dict(orient='split')
        column, content = data['columns'], data['data']
        return column, content
    except Exception as e:
        logger.error(f"get_yarn_job_cpu_top => {e}")
        return [], []


if __name__ == '__main__':
    from pprint import pprint

    now = datetime.now()
    begin_time = (now - timedelta(minutes=60)).strftime("%Y-%m-%d %H:%M:%S")
    end_time = now.strftime("%Y-%m-%d %H:%M:%S")
    pprint(yarn_jobs_report_handler("2023-02-20 00:00:00", "2023-02-20 23:59:59"))
    # yarn_handler = YarnHandler()
    # begin_time = '2023-01-31 09:00:00'
    # end_time = '2023-01-31 10:00:00'
    # yarn_handler.get_yarn_jobs(cluster_name='Cluster 1', service_name='yarn', from_time=begin_time, to_time=end_time,
    #                            filter="application_type != 'Apache Flink'", limit=1000, offset=0)
