import pyhdfs
import pandas as pd
from app.bigdata.handlers.cm_handler import CM6

from app.bigdata.models.hive_table_info import HiveTableInfo
from sqlalchemy.orm import Session
from app.bigdata.utils import unit_convert
from core.logger import logger


class HiveHandler(CM6):
    def __init__(self):
        super(HiveHandler, self).__init__()
        self.fs = self.fs_init()

    def fs_init(self, hosts='header01.big.db.coltd:9870,header02.big.db.coltd:9870', user_name='root'):
        try:
            self.fs = pyhdfs.HdfsClient(hosts=hosts, user_name=user_name)
            return self.fs
        except Exception as e:
            logger.info(f"fs_init error =>{e}")
            return False

    def get_tables_info(self, hive_warehouse_path='/user/hive/warehouse/'):
        result = []
        if not self.fs:
            logger.error(f"pyhdfs init error => self.fs is not available")
            return result
        if not self.fs.exists(hive_warehouse_path):
            logger.error(f"get_tables_info error =>{hive_warehouse_path} is not exists")
            return result
        for database in self.fs.listdir(hive_warehouse_path):
            if self.fs.get_content_summary(database_path := f"{hive_warehouse_path}/{database}").spaceConsumed:
                for table in self.fs.listdir(f'{database_path}'):
                    if self.fs.exists(f"{database_path}/{table}"):
                        if capacity := self.fs.get_content_summary(f"{database_path}/{table}").spaceConsumed:
                            result.append(
                                {"database_name": database, "table_name": table, "raw_capacity": capacity,
                                 "capacity": unit_convert(capacity), "hdfs_path": f"{hive_warehouse_path}{database}/{table}"})
        return result


def latest_hive_db_data(start_time: str, end_time: str, db: Session):
    data = HiveTableInfo.get_db_data_by_date_range(start_time, end_time, db)
    if not data:
        logger.warning(f"get hive data sql => amount: {len(data)}")
        return []
    data = [{'database_name': item['database_name'], 'capacity': unit_convert(item['raw_capacity']), 'date': item['create_date']} for item
            in data]
    return data


def latest_dbs_table_data(start_time: str, end_time: str, db: Session):
    data = HiveTableInfo.get_dbs_table_data_by_time_range(start_time, end_time, db)
    if not data:
        return False
    return data


def latest_db_table_data(table_name: str, start_time: str, end_time: str, db: Session):
    data = HiveTableInfo.get_db_table_by_date_range(table_name, start_time, end_time, db)
    if not data:
        return False
    return data


def hive_capacity_report_handler(start_time: str, end_time: str, db: Session):
    try:
        result = latest_hive_db_data(start_time, end_time, db)
        if not result:
            return [], []
        data = {}
        for item in result:
            if not data.get(item['database_name'], None):
                data[item['database_name']] = {}
                data[item['database_name']].update({item['date']: item['capacity']})
            else:
                data[item['database_name']].update({item['date']: item['capacity']})
        df = pd.DataFrame(data)
        df = df.T
        df.reset_index(drop=False, inplace=True)
        df.rename(columns={"index": "database_name"}, inplace=True)
        data = df.to_dict(orient='split')
        title, content = data['columns'], data['data']
        return title, content
    except Exception as e:
        logger.error(f"capacity_report_handler => {e}")
        return [], []


def hive_capacity_table_report_handler(start_time: str, end_time: str, db: Session):
    try:
        result = HiveTableInfo.get_db_table_by_date_range(start_time, end_time, db)
        if not result:
            return [], []
        data = {}
        for item in result:
            if not data.get(item['table_name'], None):
                data[item['table_name']] = {}
                data[item['table_name']].update({item['date']: item['capacity']})
            else:
                data[item['table_name']].update({item['date']: item['capacity']})
        df = pd.DataFrame(data)
        df = df.T
        df.reset_index(drop=False, inplace=True)
        df.rename(columns={"index": "table_name"}, inplace=True)
        data = df.to_dict(orient='split')
        title, content = data['columns'], data['data']
        return title, content
    except Exception as e:
        logger.error(f"capacity_report_handler => {e}")
        return [], []


if __name__ == '__main__':
    from pprint import pprint

    # hive_handler = HiveHandler()
    # pprint(hive_handler.fs.listdir("/user/hive/warehouse/anti_tmp.db"))
    # pprint(hive_handler.fs.get_content_summary("/user/hive/warehouse/x1"))
    # hive_handler.get_tables_info()
    # from pprint import pprint
    # from common.database import SessionLocal
    # from app.bigdata.tasks import bigdata_hive_export_data_async
    # import time

    # session = SessionLocal()

    # start = time.time()
    # # 中间写上代码块
    # bigdata_hive_export_data_async()
    # end = time.time()
    # print('程序运行时间为: %s Seconds' % (end - start))
    # pprint(hive_capacity_table_report_handler('2023-02-01', '2023-02-07', session))

#     def get_hive_query(self, from_time, to_time):
#
#         api_instance = cm_client.YarnApplicationsResourceApi(cm_client.ApiClient(api_url))
#         hive_query_response = api_instance.get_yarn_applications(cluster_name, service_name, filter=filter,
#                                                                    _from=datetime.strptime(time_item.get("start"),
#                                                                                            "%Y-%m-%d %H:%M:%S.%f"),
#                                                                    limit=limit, offset=offset,
#                                                                    to=datetime.strptime(time_item.get("to"),
#                                                                                         "%Y-%m-%d %H:%M:%S.%f"))
#
#         # query = "select total_kudu_on_disk_size_across_kudu_replicas where category=KUDU_TABLE "
#         result = {}
#         api_instance = cm_client.TimeSeriesResourceApi(self.api_client)
#         data = api_instance.query_time_series(_from=from_time, query=query, to=to_time)
#         if not data:
#             logger.error(f"get cm kudu table is null")
#             return False
#         for item in data.items[0].time_series:
#             try:
#                 result.update(
#                     {item.metadata.attributes["kuduTableId"]: f"{item.data[0].value / 1024 / 1024 / 1024:.2f}"})
#             except Exception as e:
#                 logger.error(f"get kudu table disk_size: {item} error =>{e}")
#                 return {}
#
#         return result
#
#
# cm_client.configuration.username = 'cm_api'
# cm_client.configuration.password = 'welabapi@2023'
#
# api_host = 'http://192.168.63.181'
# port = '7180'
# api_version = 'v17'
#
# ## Cluster Name
# cluster_name = "Cluster 1"
# api_url = api_host + ':' + port + '/api/' + api_version
# print(api_url)
# ## Connect to CM
# print("\nConnecting to Cloudera Manager at " + api_host + ":" + port)
#
# ## Get the IMPALA service
# api_client = cm_client.ApiClient(api_url)
# services_api_instance = cm_client.ServicesResourceApi(api_client)
# services = services_api_instance.read_services(cluster_name, view='FULL')
# for service in services.items:
#     # print(service.name, "-", service.type)
#     if service.type == "IMPALA":
#         impala_service = service
#         print("Located Impala Service: " + service.name)
#         break
# if impala_service is None:
#     print("Error: Could not locate Impala Service")
#     quit(1)
#
# api_instance = cm_client.ImpalaQueriesResourceApi(cm_client.ApiClient(api_url))
#
# now = datetime.utcnow()
# start = now - timedelta(days=1)
# print(type(now))
# service_name = 'impala'
# filter = 'statement RLIKE ".*welab_event.*" AND query_type !=QUERY'
# _from = start
# limit = 1000
# offset = 0
# to = now
#
# time_items = [
#     {'start': '2022-10-24 00:00:00.000000', 'to': '2022-10-24 23:59:59.000000'},
#     {'start': '2022-10-25 00:00:00.000000', 'to': '2022-10-25 23:59:59.000000'},
#     {'start': '2022-10-26 00:00:00.000000', 'to': '2022-10-26 23:59:59.000000'},
#     {'start': '2022-10-27 00:00:00.000000', 'to': '2022-10-27 23:59:59.000000'},
#     {'start': '2022-10-28 00:00:00.000000', 'to': '2022-10-28 23:59:59.000000'},
#     {'start': '2022-10-29 00:00:00.000000', 'to': '2022-10-29 23:59:59.000000'},
#     {'start': '2022-10-30 00:00:00.000000', 'to': '2022-10-30 23:59:59.000000'},
#     {'start': '2022-10-31 00:00:00.000000', 'to': '2022-10-31 23:59:59.000000'},
#     {'start': '2022-11-01 00:00:00.000000', 'to': '2022-11-01 23:59:59.000000'},
#     {'start': '2022-11-02 00:00:00.000000', 'to': '2022-11-02 23:59:59.000000'},
#     {'start': '2022-11-03 00:00:00.000000', 'to': '2022-11-03 23:59:59.000000'},
#     {'start': '2022-11-04 00:00:00.000000', 'to': '2022-11-04 23:59:59.000000'},
#     {'start': '2022-11-05 00:00:00.000000', 'to': '2022-11-05 23:59:59.000000'},
#     {'start': '2022-11-06 00:00:00.000000', 'to': '2022-11-06 23:59:59.000000'},
#     {'start': '2022-11-07 00:00:00.000000', 'to': '2022-11-07 23:59:59.000000'},
#     {'start': '2022-11-08 00:00:00.000000', 'to': '2022-11-08 23:59:59.000000'},
#     {'start': '2022-11-09 00:00:00.000000', 'to': '2022-11-09 23:59:59.000000'},
#     {'start': '2022-11-10 00:00:00.000000', 'to': '2022-11-10 23:59:59.000000'},
#     {'start': '2022-11-11 00:00:00.000000', 'to': '2022-11-11 23:59:59.000000'},
#     {'start': '2022-11-12 00:00:00.000000', 'to': '2022-11-12 23:59:59.000000'},
#     {'start': '2022-11-13 00:00:00.000000', 'to': '2022-11-13 23:59:59.000000'},
#     {'start': '2022-11-14 00:00:00.000000', 'to': '2022-11-14 23:59:59.000000'},
#     {'start': '2022-11-15 00:00:00.000000', 'to': '2022-11-15 23:59:59.000000'},
#     {'start': '2022-11-16 00:00:00.000000', 'to': '2022-11-16 23:59:59.000000'},
#     {'start': '2022-11-17 00:00:00.000000', 'to': '2022-11-17 23:59:59.000000'},
#     {'start': '2022-11-18 00:00:00.000000', 'to': '2022-11-18 23:59:59.000000'},
#     {'start': '2022-11-19 00:00:00.000000', 'to': '2022-11-19 23:59:59.000000'},
#     {'start': '2022-11-20 00:00:00.000000', 'to': '2022-11-20 23:59:59.000000'},
#     {'start': '2022-11-21 00:00:00.000000', 'to': '2022-11-21 23:59:59.000000'},
#     {'start': '2022-11-22 00:00:00.000000', 'to': '2022-11-22 23:59:59.000000'},
#     {'start': '2022-11-23 00:00:00.000000', 'to': '2022-11-23 23:59:59.000000'},
#     {'start': '2022-11-24 00:00:00.000000', 'to': '2022-11-24 23:59:59.000000'},
# ]
#
# for time_item in time_items:
#     impala_query_response = api_instance.get_impala_queries(cluster_name, service_name, filter=filter,
#                                                             _from=datetime.strptime(time_item.get("start"),
#                                                                                     "%Y-%m-%d %H:%M:%S.%f"),
#                                                             limit=limit, offset=offset,
#                                                             to=datetime.strptime(time_item.get("to"),
#                                                                                  "%Y-%m-%d %H:%M:%S.%f"))
#
#     if queries := impala_query_response.queries:
#         print(len(queries))
#
#         for item in queries:
#             excel_writer = pd.ExcelWriter(f"test.xlsx")
#             df = pd.DataFrame([{'user': item.user, 'statement': item.statement, 'start_time': item.start_time,
#                                 'end_time': item.end_time}])
#             # mode = 'a'为追加数据，index为每行的索引序号，header为标题
#             df.to_csv('supplier_data.csv', mode='a', index=False, header=False)

# s2 = pd.DataFrame(np.array([['s2', 's2', 's2', 's2']]), columns=['a', 'b', 'c', 'd'])
