# https://docs.cloudera.com/documentation/enterprise/6/6.3/topics/cm_dg_impala_queries.html
# https://cloudera.github.io/cm_api/apidocs/v6/path__clusters_-clusterName-_services_-serviceName-_impalaQueries.html
# https://archive.cloudera.com/cm6/6.3.1/generic/jar/cm_api/swagger-html-sdk-docs/python/docs/ImpalaQueriesResourceApi.html
from datetime import datetime, timedelta

import pandas as pd
from dateutil.parser import parse, ParserError
from sqlalchemy.orm import Session

from app.bigdata.models.impala_query_info import ImpalaQueryInfo
from app.bigdata.handlers.cm_handler import CM6
from app.bigdata.utils import time_calc, collection, unit_convert
from core.logger import logger
from common.utils import is_empty

from setting import config


class ImpalaHandler(CM6):

    def __init__(self):
        super().__init__()

    def api_v17_init(self):
        api_url = f"{self.protocol}://{self.host}:{self.port}/api/v17"
        return api_url

    def get_impala_queries(self, cluster_name, service_name='impala', filter='', from_time=None, to_time=None, offset=0, limit=1000):
        """
        :param cluster_name:
        :param service_name:
        :param filter:
        :param from_time:  '2023-01-11 17:20:09'
        :param to_time:    '2023-01-11 17:25:09'
        :param offset:
        :param limit:
        :return:
        """
        api_url = self.api_v17_init()
        try:
            self.cm_client.EventsResourceApi()
            api_instance = self.cm_client.ImpalaQueriesResourceApi(self.cm_client.ApiClient(api_url))
            if not from_time or not to_time:
                now = datetime.now()
                from_time = (now - timedelta(minutes=5))
                to_time = now
            else:
                try:
                    from_time, to_time = parse(from_time), parse(to_time)
                except (TypeError, ParserError):
                    logger.error(f"get impala queries response => {ParserError}")
                    return []
            impala_queries_response = api_instance.get_impala_queries(cluster_name, service_name, filter=filter, _from=from_time,
                                                                      limit=limit, offset=offset, to=to_time)
            logger.info(f"get impala queries response => amount: {len(impala_queries_response.queries)}")
            if impala_queries_response.warnings:
                logger.warning(f"get impala queries response => lost data time: {impala_queries_response.warnings:}")
        except Exception as e:
            logger.error(f"get impala queries response => {e}")
            return []
        return impala_queries_response.queries


def get_impala_queries_duration_millis_top(db: Session, start_datatime: str, end_datatime: str, top: int = 10):
    try:
        # result = ImpalaQueryInfo.get_duration_millis_top_by_date(db, date, top)
        result = ImpalaQueryInfo.get_duration_millis_top_by_range_datetime(db, start_datatime, end_datatime, top)
        if not result:
            return [], []
        data = [{"user": item['user'], "duration_millis": time_calc(item['duration_millis']),
                 "memory_aggregate_peak": unit_convert(item['memory_aggregate_peak']),
                 "estimated_per_node_peak_memory": unit_convert(item['estimated_per_node_peak_memory']),
                 "thread_cpu_time": time_calc(item['thread_cpu_time']), "start_time": item['start_time'],
                 "end_time": item['end_time'],
                 "statement": f"<a href='{config.OPS_HTTP_URL}/ops/api/v1/bigdata/impala/sql?query_id={item['query_id']}'>详情信息</a>"} for
                item in result]
        # "statement": f"{config.OPS_HTTP_URL}/ops/api/v1/bigdata/impala/sql?query_id={item['query_id']}"} for item in result]
        df = pd.DataFrame(data)
        data = df.to_dict(orient='split')
        column, content = data['columns'], data['data']
        return column, content
    except Exception as e:
        logger.error(f"duration_millis_report_handler => {e}")
        return [], []


def get_impala_queries_memory_aggregate(db: Session, start_datatime: str, end_datatime: str, top: int = 10):
    try:
        # result = ImpalaQueryInfo.get_memory_aggregate_top_by_date(db, date, top)
        result = ImpalaQueryInfo.get_memory_aggregate_top_by_range_datetime(db, start_datatime, end_datatime, top)
        if not result:
            return [], []
        data = [{"user": item['user'], "duration_millis": time_calc(item['duration_millis']),
                 "memory_aggregate_peak": unit_convert(item['memory_aggregate_peak']),
                 "estimated_per_node_peak_memory": unit_convert(item['estimated_per_node_peak_memory']),
                 "thread_cpu_time": time_calc(item['thread_cpu_time']), "start_time": item['start_time'],
                 "end_time": item['end_time'],
                 "statement": f"<a href='{config.OPS_HTTP_URL}/ops/api/v1/bigdata/impala/sql?query_id={item['query_id']}'>详情信息</a>"} for
                item in result]
        df = pd.DataFrame(data)
        data = df.to_dict(orient='split')
        column, content = data['columns'], data['data']
        return column, content
    except Exception as e:
        logger.error(f"memory_aggregate_report_handler => {e}")
        return [], []


def get_impala_queries_memory_estimated(db: Session, start_datatime: str, end_datatime: str, top: int = 10):
    try:
        # result = ImpalaQueryInfo.get_memory_estimated_top_by_date(db, date, top)
        result = ImpalaQueryInfo.get_memory_estimated_top_by_range_datetime(db, start_datatime, end_datatime, top)
        if not result:
            return [], []
        data = [{"user": item['user'], "duration_millis": time_calc(item['duration_millis']),
                 "memory_aggregate_peak": unit_convert(item['memory_aggregate_peak']),
                 "estimated_per_node_peak_memory": unit_convert(item['estimated_per_node_peak_memory']),
                 "thread_cpu_time": time_calc(item['thread_cpu_time']), "start_time": item['start_time'],
                 "end_time": item['end_time'],
                 "statement": f"<a href='{config.OPS_HTTP_URL}/ops/api/v1/bigdata/impala/sql?query_id={item['query_id']}'>详情信息</a>"} for
                item in result]
        df = pd.DataFrame(data)
        data = df.to_dict(orient='split')
        column, content = data['columns'], data['data']
        return column, content
    except Exception as e:
        logger.error(f"memory_estimated_report_handler => {e}")
        return [], []


def get_impala_queries_thread_cpu_time(db: Session, start_datatime: str, end_datatime: str, top: int = 10):
    try:
        # result = ImpalaQueryInfo.get_tread_cpu_time_top_by_date(db, date, top)
        result = ImpalaQueryInfo.get_tread_cpu_time_top_by_range_datetime(db, start_datatime, end_datatime, top)
        if not result:
            return [], []
        data = [{"user": item['user'], "duration_millis": time_calc(item['duration_millis']),
                 "memory_aggregate_peak": unit_convert(item['memory_aggregate_peak']),
                 "estimated_per_node_peak_memory": unit_convert(item['estimated_per_node_peak_memory']),
                 "thread_cpu_time": time_calc(item['thread_cpu_time']), "start_time": item['start_time'],
                 "end_time": item['end_time'],
                 "statement": f"<a href='{config.OPS_HTTP_URL}/ops/api/v1/bigdata/impala/sql?query_id={item['query_id']}'>详情信息</a>"} for
                item in result]
        df = pd.DataFrame(data)
        data = df.to_dict(orient='split')
        column, content = data['columns'], data['data']
        return column, content
    except Exception as e:
        logger.error(f"thread_cpu_time_report_handler => {e}")
        return [], []


def impala_queries_report_handler(begin_time, end_time):
    logger.info(f"impala queries report handler => datetime_range: {begin_time}-{end_time}")
    try:
        data = []
        impala_handler = ImpalaHandler()
        items = impala_handler.get_impala_queries('Cluster 1', from_time=begin_time, to_time=end_time,
                                                  filter='statement RLIKE "^create.*" OR statement RLIKE "^select.*from.*" OR '
                                                         'statement RLIKE "^insert.*" OR statement RLIKE "^upsert.*" OR '
                                                         'statement RLIKE "^update.*" OR statement RLIKE "^delete.*" OR '
                                                         'statement RLIKE "^drop.*" OR statement RLIKE "^refresh.*" OR '
                                                         'statement RLIKE "^INVALIDATE METADATA.*" OR statement RLIKE "^COMPUTE STATS.*"')
        if not items:
            return []

        for item in items:
            item = item.to_dict()
            if statement := item.get('statement'):
                del item['statement']
            duration_millis = 0
            if not is_empty(item.get('duration_millis')):
                duration_millis = item.get('duration_millis')
            memory_aggregate_peak = 0
            if not is_empty(item['attributes'].get('memory_aggregate_peak')):
                memory_aggregate_peak = item['attributes'].get('memory_aggregate_peak')
            estimated_per_node_peak_memory = 0
            if not is_empty(item['attributes'].get('estimated_per_node_peak_memory')):
                estimated_per_node_peak_memory = item['attributes'].get('estimated_per_node_peak_memory')
            thread_cpu_time = 0
            if not is_empty(item['attributes'].get('thread_cpu_time')):
                thread_cpu_time = item['attributes'].get('thread_cpu_time')
            end = None
            if not is_empty(item.get('end_time')):
                end = collection(item.get('end_time'), cst=False, dtype='cst_str')
            data.append({'query_id': item.get('query_id'), 'user': item.get('user'), 'statement': statement,
                         'duration_millis': duration_millis, 'query_state': item.get('query_state'),
                         'query_status': item['attributes'].get('query_status'), 'memory_aggregate_peak': memory_aggregate_peak,
                         'estimated_per_node_peak_memory': estimated_per_node_peak_memory, 'thread_cpu_time': thread_cpu_time,
                         'oom': item['attributes'].get("oom"), 'start_time': collection(item.get('start_time'), cst=False, dtype='cst_str'),
                         'end_time': end, 'create_date': collection(item.get('start_time'), cst=False, dtype='cst_str').split(' ')[0],
                         'query_response': item})
    except Exception as e:
        logger.error(f"impala queries report handler => {e}")
        return []

    return data


def impala_queries_handler(cluster_name, begin_time, end_time, filter):
    logger.info(f"impala queries report handler => datetime_range: {begin_time}-{end_time}")
    try:
        data = []
        impala_handler = ImpalaHandler()
        items = impala_handler.get_impala_queries(cluster_name, from_time=begin_time, to_time=end_time, filter=filter)
        if not items:
            return []

        for item in items:
            item = item.to_dict()
            if statement := item.get('statement'):
                del item['statement']
            duration_millis = 0
            if not is_empty(item.get('duration_millis')):
                duration_millis = item.get('duration_millis')
            memory_aggregate_peak = 0
            if not is_empty(item['attributes'].get('memory_aggregate_peak')):
                memory_aggregate_peak = item['attributes'].get('memory_aggregate_peak')
            estimated_per_node_peak_memory = 0
            if not is_empty(item['attributes'].get('estimated_per_node_peak_memory')):
                estimated_per_node_peak_memory = item['attributes'].get('estimated_per_node_peak_memory')
            thread_cpu_time = 0
            if not is_empty(item['attributes'].get('thread_cpu_time')):
                thread_cpu_time = item['attributes'].get('thread_cpu_time')
            end = None
            if not is_empty(item.get('end_time')):
                end = collection(item.get('end_time'), cst=False, dtype='cst_str')
            data.append({'query_id': item.get('query_id'), 'user': item.get('user'), 'statement': statement,
                         'duration_millis': duration_millis, 'query_state': item.get('query_state'),
                         'query_status': item['attributes'].get('query_status'), 'memory_aggregate_peak': memory_aggregate_peak,
                         'estimated_per_node_peak_memory': estimated_per_node_peak_memory, 'thread_cpu_time': thread_cpu_time,
                         'oom': item['attributes'].get("oom"), 'start_time': collection(item.get('start_time'), cst=False, dtype='cst_str'),
                         'end_time': end, 'create_date': collection(item.get('start_time'), cst=False, dtype='cst_str').split(' ')[0],
                         'query_response': item})
    except Exception as e:
        logger.error(f"impala queries handler => {e}")
        return []

    return data


if __name__ == '__main__':
    from common.database import SessionLocal
    from app.bigdata.utils import time_interval_ranges

    session = SessionLocal()

    from datetime import datetime, timedelta

    now = datetime.now()
    start = (now - timedelta(minutes=5)).strftime('%Y-%m-%d %H:%M:%S')
    end = now.strftime('%Y-%m-%d %H:%M:%S')
    result = impala_queries_handler('Cluster 1', start, end, filter='statement RLIKE ".*count\\(distinct.*"')
    print(result)

    # datetime_ranges = time_interval_ranges('2023-02-01 00:00:00', '2023-02-01 23:59:59', 1)
    #
    # impala_handler = ImpalaHandler()
    #
    # for datetime_range in datetime_ranges:
    #     items = impala_handler.get_impala_queries('Cluster 1', from_time=datetime_range.get("begin_time"),
    #                                               to_time=datetime_range.get("end_time"),
    #                                               filter='statement RLIKE "^USE.*"')
    #
    #     df = pd.DataFrame([{
    #         'datetime_range': f"{datetime_range.get('begin_time')}~{datetime_range.get('end_time')}",
    #         'amount': len(items)
    #     }])
    #     # mode = 'a'为追加数据，index为每行的索引序号，header为标题
    #     df.to_csv('use_database_2023-02-01.csv', mode='a', index=False, header=False)
