import pyarrow as pa
import pyarrow.parquet as pq
import pandas as pd
import numpy as np
import oss2
import time
import os
import traceback
import logging
from io import StringIO, BytesIO
from sqlalchemy import create_engine
import sqlparse
from sqlparse.sql import IdentifierList, Identifier, Where
from sqlparse.tokens import Keyword, DML
from elasticsearch import Elasticsearch, helpers
from omegaconf import OmegaConf
from pyhive import hive
from collections import OrderedDict
from storage_utils.table_schema import TABLE_SCHEMA_MAP

logger = logging.getLogger(__name__)

ALL_JOIN_TYPE = (
'JOIN', 'LEFT JOIN', 'RIGHT JOIN', 'INNER JOIN', 'FULL JOIN', 'LEFT OUTER JOIN', 'FULL OUTER JOIN')

class StorageAbstractLayer():
    def __init__(self, config=None):
        """
        platform_make 为 es 时认为是云下环境, 数据获取及存储 均操作es
        platform_make 为 hive 时认为读写 hive 中的表
        platform_make 为 oss 时认为读写oss中的 cav 文件
        """
        """
            {'storage_utils':{
                'platform_make': 'es', 
                'oss':{
                    'access_id': '',
                    'access_secret': '',
                    'endpoint': '',
                    'bucket_name': '',
                    'scheme': '' 
                },
                'es':{
                    'user': '',
                    'password': '',
                    'hosts': ''
                },
                'odps':{},
                }
            }
        """
        storage_config_path = os.environ.get('STORAGE_CONFIG_PATH')
        if (storage_config_path and os.path.isfile(storage_config_path)) or config is None:
            zshield_home = os.environ.get('ZSHIELD_HOME', '/home/zshield')
            config_path = storage_config_path or f'{zshield_home}/bin/ruleng/storage_utils/storage_utils.yaml'
            if not os.path.exists(config_path):
                config_path = 'storage_utils/storage_utils.yaml'
            config = OmegaConf.load(config_path)
            self.platform_make = config.get('storage_utils').get('platform_make')
            oss_config = config.get('storage_utils').get('oss')
            odps_cfg = config.get('storage_utils').get('odps')
            es_cfg = config.get('storage_utils').get('es')
            hive_cfg = config.get('storage_utils').get('hive')
            if oss_config:
                self.oss_obj = OSS_Handler(**oss_config)
            if odps_cfg:
                self.odps_obj = ODPS_Handler(odps_cfg)
            if es_cfg:
                port = es_cfg.get('port')
                self.es_obj = ES_Handler(es_cfg['user'], es_cfg['password'], es_cfg['hosts'], port)
            if hive_cfg:
                self.hive_obj = HIVE_Handler(**hive_cfg)
        elif isinstance(config, dict):
            self.platform_make = config.get('storage_utils').get('platform_make')
            oss_config = config.get('storage_utils').get('oss')
            odps_cfg = config.get('storage_utils').get('odps')
            es_cfg = config.get('storage_utils').get('es')
            hive_cfg = config.get('storage_utils').get('hive')
            if oss_config:
                self.oss_obj = OSS_Handler(**oss_config)
            if odps_cfg:
                self.odps_obj = ODPS_Handler(odps_cfg)
            if es_cfg:
                self.es_obj = ES_Handler(**es_cfg)
            if hive_cfg:
                self.hive_obj = HIVE_Handler(**hive_cfg)
        else:
            self.platform_make = config.storage_utils.platform_make
            # odps
            odps_cfg = config.storage_utils.odps
            # oss
            oss_config = config.storage_utils.oss
            access_id = oss_config.access_id
            access_secret = oss_config.access_secret
            endpoint = oss_config.endpoint
            bucket_name = oss_config.bucket_name
            scheme = oss_config.scheme if oss_config.scheme else 'http'
            # hive
            hive_config = config.storage_utils.hive
            hive_host = hive_config.host
            hive_port = hive_config.port
            hive_username = hive_config.username
            hive_password = hive_config.password
            hive_auth = hive_config.auth
            # es
            es_cfg = config.connect
            user = es_cfg.es.user
            password = es_cfg.es.password
            hosts = es_cfg.es.hosts
            port = es_cfg.es.port
            self.oss_obj = OSS_Handler(access_id, access_secret, endpoint, bucket_name, scheme)

            self.odps_obj = ODPS_Handler(odps_cfg)
            self.es_obj = ES_Handler(user, password, hosts, port)
            self.hive_obj = HIVE_Handler(hive_host, hive_port, hive_username, hive_password, hive_auth)
        if self.platform_make:
            try:
                if int(self.platform_make) == 1:
                    self.platform_make = 'es'
                elif int(self.platform_make) == 2:
                    self.platform_make = 'oss'
                elif int(self.platform_make) == 3:
                    self.platform_make = 'hive'
            except:
                self.platform_make = str(self.platform_make)

        if self.platform_make not in ['es', 'oss', 'hive']:
            raise ValueError(f'不支持的平台: {self.platform_make}')

    def Read(self, db, table, sql, flag):
        "数据读取"
        logger.info(f'Read platform_make: {self.platform_make}')
        if self.platform_make == 'es':
            self.es_obj.init_es()
            result = self.es_obj.read(db, table, sql)
        elif self.platform_make=='oss':
            self.oss_obj.init_oss()
            result = self.oss_obj.read(db, table, sql)
        elif self.platform_make=='hive':
            self.hive_obj.init_hive()
            result = self.hive_obj.read(db, table, sql)
        else:
            raise ValueError(f'不支持的平台: {self.platform_make}')
        return result

    def Write(self, db, table, data, flag):
        "数据写入"
        logger.info(f'Write platform_make: {self.platform_make}')

        if self.platform_make == 'es':
            self.es_obj.init_es()
            result = self.es_obj.write(db, table, data)
        elif self.platform_make == 'oss':
            self.oss_obj.init_oss()
            result = self.oss_obj.write(db, table, data)
        elif self.platform_make == 'hive':
            self.hive_obj.init_hive()
            result = self.hive_obj.write(db, table, data)
        else:
            raise ValueError('不支持的flag')
        return result

    def Delete(self, db, table, sql, flag=None):
        "数据删除"
        logger.info(f'Delete platform_make: {self.platform_make}')
        if self.platform_make == 'es':
            self.es_obj.init_es()
            self.es_obj.delete(db, table, sql)
            logger.info(f'删除索引: {table}')
        elif self.platform_make == 'oss':
            self.oss_obj.init_oss()
            self.oss_obj.delete(db, table, sql)
            logger.info(f'删除oss文件: {table}')
        elif self.platform_make == 'hive':
            pass
        else:
            raise ValueError('不支持的flag')

    def Update(self):
        "数据更新"


class ODPS_Handler():
    def __init__(self, odps_config=None):
        self.spark = None

    def init_odps(self):
        if self.spark is None:
            from pyspark.sql import SparkSession
            self.spark = SparkSession.builder \
                .appName("spark sql") \
                .config("spark.sql.broadcastTimeout", 20 * 60) \
                .config("spark.hadoop.fs.oss.cname.enable", False) \
                .config("spark.sql.crossJoin.enabled", True) \
                .config("odps.exec.dynamic.partition.mode", "nonstrict") \
                .getOrCreate()

    def read(self, db, table, sql):
        spark_df = self.spark.sql(sql)
        df = spark_df.toPandas()
        return df
    # def init_odps(self):
    #     if self.odps is None:
    #         from odps import ODPS
    #         self.odps = ODPS(access_id=self.access_id,
    #                          secret_access_key=self.secret_access_key,
    #                          project=self.project,
    #                          endpoint=self.endpoint)
    #
    # def init_external_table(self, table, bd, schema):
    #     location_path = 'oss://{access_id}:{secret_access_key}@{endpoint}/{path}'
    #
    #     location_path = location_path.format(access_id=self.oss_obj.access_id,
    #                                          secret_access_key=self.oss_obj.access_secret,
    #                                          endpoint='oss-cn-beijing-internal.aliyuncs.com',
    #                                          path=os.path.join(self.oss_obj.bucket_name,
    #                                                            bd, table).replace('\\', '/'))
    #
    #     ddl_field = ', '.join([f'{column.name} {column.type}' for column in schema])
    #     logger.info(ddl_field)
    #     sql = f"""CREATE EXTERNAL TABLE {table}
    #                 ({ddl_field})
    #                 STORED AS PARQUET
    #                 LOCATION '{location_path}'; """
    #     drop_sql = f"""drop table if exists {table};"""
    #     logger.info(drop_sql)
    #     self.run_sql(drop_sql)
    #     #
    #     self.run_sql(sql)
    #
    #     df = self.read(f"""select * from {table} limit 10""")
    #     logger.info(df)
    #
    # def run_sql(self, sql):
    #     inst = self.odps.run_sql(sql)
    #     inst.wait_for_success()
    #
    # def read(self, db, table, sql):
    #     with self.odps.execute_sql(sql, hints={'odps.sql.allow.fullscan': True}).open_reader(
    #             tunnel=True, limit=False) as reader:
    #         df = reader.to_pandas()
    #         return df
    #
    # @staticmethod
    # def json_to_parquet(data, output, schema):
    #     column_data = {}
    #     array_data = []
    #
    #     for row in data:
    #         for column in schema.names:
    #             _col = column_data.get(column, [])
    #             _col.append(row.get(column))
    #             column_data[column] = _col
    #
    #     for column in schema:
    #         _col = column_data.get(column.name)
    #         if isinstance(column.type, pa.lib.TimestampType):
    #             _converted_col = []
    #             for t in _col:
    #                 # t = min(t, max_default_t)
    #
    #                 _converted_col.append((np.datetime64(int(t), 'ms')))
    #             array_data.append(
    #                 pa.Array.from_pandas(pd.to_datetime(_converted_col), type=pa.timestamp('ms')))
    #         # Float types are ambiguous for conversions, need to specify the exact type
    #         elif column.type.id == pa.float64().id:
    #             array_data.append(pa.array(_col, type=pa.float64()))
    #         elif column.type.id == pa.float32().id:
    #             # Python doesn't have a native float32 type
    #             # and PyArrow cannot cast float64 -> float32
    #             _col = pd.to_numeric(_col, downcast='float')
    #             array_data.append(pa.Array.from_pandas(_col, type=pa.float32()))
    #         elif column.type.id == pa.int32().id:
    #             _col = pd.to_numeric(_col, downcast='integer')
    #             array_data.append(pa.Array.from_pandas(_col, type=pa.int32()))
    #         elif column.type.id == pa.int64().id:
    #             array_data.append(pa.array([int(ele) for ele in _col], type=pa.int64()))
    #         else:
    #             array_data.append(pa.array(_col, type=column.type))
    #
    #     data = pa.RecordBatch.from_arrays(array_data, schema.names)
    #
    #     try:
    #         table = pa.Table.from_batches(data)
    #     except TypeError:
    #         table = pa.Table.from_batches([data])
    #
    #     pq.write_table(table, output, compression='SNAPPY', coerce_timestamps='ms')
    #
    # def write(self, db, table, schema, data):
    #     f = BytesIO()
    #     self.json_to_parquet(data, f, schema)
    #     f.seek(0)
    #     file_name = f"{table}_{time.strftime('%Y_%m_%dT%H_%M_%S')}.parquet"
    #     remote_path = os.path.join(db, table, file_name).replace('\\', '/')
    #     logger.info(remote_path)
    #
    #     self.oss_cli.put_object(remote_path, f.read())


class OSS_Handler():
    def __init__(self, access_id, access_secret, endpoint, bucket_name, scheme='http'):
        self.access_id = access_id
        self.access_secret = access_secret
        self.endpoint = endpoint
        self.bucket_name = bucket_name
        self.scheme = scheme

        self.oss_cli = None
        self.engine = None

    def init_oss(self):
        if self.oss_cli is None:
            self.engine = create_engine('sqlite://', echo=False)
            endpoint_url = f'{self.scheme}://{self.endpoint}'
            self.oss_cli = oss2.Bucket(oss2.Auth(self.access_id, self.access_secret), endpoint_url,
                                       self.bucket_name)

    def read_oss_csv(self, db, table):
        index = os.path.join(db, f'{table}.csv').replace('\\', '/')
        if not self.oss_cli.object_exists(index):
            return
        result_obj = self.oss_cli.get_object(index)

        df = pd.read_csv(StringIO(result_obj.read().decode("utf-8")))
        return df

    def drop_temp_table(self, tables):
        for table_name in tables:
            drop_table_sql = f"drop table if exists {table_name}"
            self.engine.execute(drop_table_sql)

    def read(self, db, table, sql):
        """
        :param db: 'blind/盲区大表83_100.csv'
        :param table: 'blind/盲区大表83_100.csv'
        :param sql: 'select * from {index} limit 10'
        :return: pandas df
        """
        print(db, table, sql)
        tables = self.extract_tables(sql)
        for table_name in tables:
            df = self.read_oss_csv(db, table)
            print(df)
            if df is None:
                return pd.DataFrame()
            df.to_sql(table_name, con=self.engine, index=False)
        read_result = self.engine.execute(sql)
        d, a = {}, []
        for rowproxy in read_result:
            for column, value in rowproxy.items():
                # build up the dictionary
                d = {**d, **{column: value}}
            a.append(d)
        self.drop_temp_table(tables)
        result_df = pd.DataFrame(a)
        return result_df

    def read_engion_result(self, read_result):
        d, a = {}, []
        for rowproxy in read_result:
            for column, value in rowproxy.items():
                # build up the dictionary
                d = {**d, **{column: value}}
            a.append(d)
        result_df = pd.DataFrame(a)
        return result_df

    def write(self, db, table, data):

        df = self.read_oss_csv(db, table)
        schema = TABLE_SCHEMA_MAP.get(table)
        # if '_id' in schema.names:
        #     result_dic = OrderedDict()
        #     for data_info in data:
        #         record = dict(zip(schema.names, data_info))
        #         result_dic[record['_id']] = data_info
        #     data = [tuple(i) for i in result_dic.values()]
        if df is None:
            insert_data = []
            print('oss 中没有找到原始文件, 直接写入数据')
            for data_info in data:
                data_list = []
                for column, info, type in zip(schema.names, data_info, schema.types):
                    if type == 'binary':
                        info = info.decode('utf-8')
                    if type == 'string':
                        info = str(info)
                    data_list.append(info)
                insert_data.append(tuple(data_list))
            remote_path = os.path.join(db, f'{table}.csv').replace('\\', '/')
            df = pd.DataFrame(insert_data, columns=schema.names)
            out_put_io = StringIO()
            df.to_csv(out_put_io, index=False, line_terminator='\r\n')
            csv_data = out_put_io.getvalue()
            self.oss_cli.put_object(remote_path, csv_data)
            print(f'上传 oss 文件至:{remote_path}')
        else:
            df.to_sql(table, con=self.engine, index=False)
            insert_data = []
            update_data = []

            for data_info in data:
                data_list = []
                data_id = None
                for column, info, type in zip(schema.names, data_info, schema.types):
                    if type == 'binary':
                        info = info.decode('utf-8')
                    if type == 'string':
                        info = str(info)
                    if column == '_id':
                        data_id = info
                    data_list.append(info)
                if '_id' in schema.names:
                    exits_df = self.read_engion_result(self.engine.execute(f"select * from {table} where _id='{data_id}'"))
                    if not exits_df.empty:
                        update_data.append(tuple(data_list))
                    else:
                        insert_data.append(tuple(data_list))
                else:
                    insert_data.append(tuple(data_list))

            for d in update_data:
                data_id = None
                data_li = []
                for column, info in zip(schema.names, d):
                    if column == '_id':
                        data_id = info
                    data_li.append(f"{column}='{info}'")
                column_str = ','.join(data_li)

                update_sql = f"update {table} set {column_str} where _id='{data_id}'"
                self.engine.execute(update_sql)

            if insert_data:
                values = ','.join([repr(tup) for tup in insert_data])
                insert_sql = f"insert into {table} VALUES {values}"

                self.engine.execute(insert_sql)


            read_sql = f"select * from {table}"
            read_result = self.engine.execute(read_sql)
            d, a = {}, []
            for rowproxy in read_result:
                for column, value in rowproxy.items():
                    # build up the dictionary
                    d = {**d, **{column: value}}
                a.append(d)
            result_df = pd.DataFrame(a)
            out_put_io = StringIO()
            result_df.to_csv(out_put_io, index=False, line_terminator='\r\n')
            csv_data = out_put_io.getvalue()
            remote_path = os.path.join(db, f'{table}.csv').replace('\\', '/')
            self.oss_cli.put_object(remote_path, csv_data)
            self.drop_temp_table([table])
        print('write oss down')

    def insert(self, db, table, sql):
        df = self.read_oss_csv(db, table)
        if df is None:
            return pd.DataFrame()
        df.to_sql(table, con=self.engine, index=False)
        self.engine.execute(sql)
        read_sql = f"select * from {table}"
        result_df = self.read(db, table, read_sql)
        out_put_io = StringIO()
        result_df.to_csv(out_put_io, index=False, line_terminator='\r\n')
        csv_data = out_put_io.getvalue()
        remote_path = os.path.join(db, f'{table}.csv').replace('\\', '/')
        self.oss_cli.put_object(remote_path, csv_data)
        self.drop_temp_table([table])

    def delete(self, db, table, sql):
        self.insert(db, table, sql)

    def update(self, db, table, sql):
        self.insert(db, table, sql)

    @staticmethod
    def extract_tables(sql):
        """
        提取sql中的表名（select语句）
        """

        def is_subselect(parsed):
            """
            是否子查询
            :param parsed: T.Token
            """
            if not parsed.is_group:
                return False
            for item in parsed.tokens:
                if item.ttype is DML and item.value.upper() == 'SELECT':
                    return True
            return False

        def extract_from_part(parsed):
            """
            提取from之后模块
            """
            from_seen = False
            for item in parsed.tokens:
                if from_seen:
                    if is_subselect(item):
                        for x in extract_from_part(item):
                            yield x
                    elif item.ttype is Keyword:
                        from_seen = False
                        continue
                    else:
                        yield item
                elif item.ttype is Keyword and item.value.upper() == 'FROM':
                    from_seen = True

        def extract_join_part(parsed):
            """
            提取join之后模块
            """
            flag = False
            for item in parsed.tokens:
                if flag:
                    if item.ttype is Keyword:
                        flag = False
                        continue
                    else:
                        yield item
                if item.ttype is Keyword and item.value.upper() in ALL_JOIN_TYPE:
                    flag = True

        def extract_table_identifiers(token_stream):
            for item in token_stream:
                if isinstance(item, IdentifierList):
                    for identifier in item.get_identifiers():
                        # print(identifier.get_name())
                        yield identifier.get_real_name()
                elif isinstance(item, Identifier):
                    if 'AS' in item.value.upper():
                        yield item.value.split('AS')[0].strip()
                    else:
                        yield item.get_real_name()
                elif item.ttype is Keyword:
                    yield item.value

        from_stream = extract_from_part(sqlparse.parse(sql)[0])
        join_stream = extract_join_part(sqlparse.parse(sql)[0])
        return list(extract_table_identifiers(from_stream)) + list(
            extract_table_identifiers(join_stream))


class ES_Handler:
    def __init__(self, user, password, hosts, port=None):
        """es 操作接口"""
        self.user = user
        self.password = password
        self.hosts = hosts
        self.port = port
        self.es_cli = None
        self.engine = None

    def init_es(self):
        if self.es_cli is None:
            if self.port is not None:
                self.hosts = [host + ":"+ str(self.port) for host in self.hosts]

            self.es_cli = Elasticsearch(self.hosts, http_auth=f"{self.user}:{self.password}")
            # self.engine = create_engine('sqlite://', echo=False)

    def read(self, db, table, sql):
        if 'limit' not in sql:
            sql = sql.strip(';') + ' limit 10000'
        body = {"query": sql}
        result = self.es_cli.transport.perform_request(
            "POST", "/_opendistro/_sql/", body=body
        )
        df_list = []

        for data_list in result['datarows']:
            df_list.append({k['name']: v for k, v in zip(result['schema'], data_list)})

        return pd.DataFrame(df_list)

    def make_template(self, index):
        schema = TABLE_SCHEMA_MAP.get(index)
        template_body = {
            "index_patterns": [f"{index}"],
            "mappings": {"properties": {}}
        }

        for field in schema:
            if field.name == '_id':
                continue
            if field.type == 'string':
                field_type = "keyword"
            elif field.type in ['int', 'int32', 'int64']:
                field_type = "integer"
            else:
                field_type = str(field.type)
            template_body['mappings']['properties'][f'{field.name}'] = {
                    "type": field_type
            }

        return template_body

    def write(self, db, table, data):
        schema = TABLE_SCHEMA_MAP.get(table)

        try:
            # Make sure index exists
            if not self.es_cli.indices.exists(index=table):
                template = self.make_template(table)
                self.es_cli.indices.put_template(name=table, body=template)
                # self.es_cli.indices.create(index=table, request_timeout=20)
        except:
            traceback.print_exc()

        actions = []
        for d in data:
            source = {}
            for column, info, type in zip(schema.names, d, schema.types):
                if type == 'binary':
                    info = info.decode('utf-8')
                source[column] = info
            action = {
                '_op_type': 'index',
                '_index': table,

            }
            if '_id' in source:
                id_info = source.pop('_id')
                action['_id'] = id_info
            action.update({'_source': source})
            actions.append(action)
        # logger.info(actions)

        helpers.bulk(client=self.es_cli, actions=actions)

        self.es_cli.indices.flush(table)

    def delete(self, db, table, sql):
        # self.es_cli.indices.delete(table)
        body = {"query": sql}
        result = self.es_cli.transport.perform_request(
            "POST", "/_opendistro/_sql/", body=body
        )
        for data_list in result['datarows']:
            for k, v in zip(result['schema'], data_list):
                logger.info(f'成功删除数据 {v} 条')

class HIVE_Handler:
    def __init__(self, host, port=10000, username=None, password=None, auth=None):
        """es 操作接口"""
        logger.info(f'hive_init_config: {host, port, username, password, auth}')
        self.host = host
        self.port = port
        self.username = username  if username else None
        self.password = password if password else None
        self.auth = auth  if auth else None
        self.hive_cli = None

    def init_hive(self):

        conn = hive.Connection(host=self.host, port=self.port,
                               username=self.username, password=self.password,
                               auth=self.auth)
        self.hive_cli = conn.cursor()

    def read(self, db, table, sql):
        if db:
            self.hive_cli.execute(f"USE `{db}`")
        self.hive_cli.execute(sql)
        index = self.hive_cli.description
        row = list()
        for i in range(len(index)):
            row.append(index[i][0].split('.')[-1])
        data = self.hive_cli.fetchall()
        result = pd.DataFrame(list(data), columns=row)
        return result

    def create_table(self, db, table, schema):

        field_li = []
        for column in schema:

            f_name = column.name
            f_type = column.type
            if column.type == 'int32':
                f_type = 'int'
            if column.name in ['_id', 'timestamp']:
                f_name = f'`{f_name}`'
            field_str = f'{f_name} {f_type}'
            field_li.append(field_str)
        ddl_field = ', '.join(field_li)
        sql = f"""CREATE TABLE IF NOT EXISTS {db}.`{table}` ({ddl_field})"""
        print('create_table_sql:', sql)
        self.hive_cli.execute(sql)

    def parse_data(self, data, schema):

        result = []
        for data_info in data:
            data_list = []
            for column, info, type in zip(schema.names, data_info, schema.types):
                if type == 'binary':
                    info = info.decode('utf-8')
                if type == 'string':
                    info = str(info)
                data_list.append(info)
            result.append(data_list)
        return result

    def write(self, db, table, data):
        schema = TABLE_SCHEMA_MAP.get(table)
        self.create_table(db, table, schema)
        new_data_li = self.parse_data(data, schema)
        source_sql = f"""select * from {db}.`{table}`"""
        self.hive_cli.execute(source_sql)
        source_data_li = self.hive_cli.fetchall()
        print(source_data_li)
        source_data_li.extend(new_data_li)
        if '_id' in schema.names:
            # 按照_id去重,取最后一条
            result_dic = OrderedDict()
            for data_info in source_data_li:
                record = dict(zip(schema.names, data_info))
                result_dic[record['_id']] = data_info
            source_data_li = result_dic.values()
        # 写入数据库
        values = ','.join([repr(tuple(tup)) for tup in source_data_li])
        insert_sql = f"""insert overwrite table {db}.`{table}` VALUES {values}"""
        print('insert_sql:', insert_sql)
        self.hive_cli.execute(insert_sql)


if __name__ == '__main__':
    pass
    # 初始化接口
    config_obj = {'storage_utils':{
                'platform_make': 'hive',
                'oss':{
                    'access_id': 'LTAI5tJKKszKXk5nFkjhFZ9M',
                    'access_secret': '5niOEhK1VT32H3LvWBAHaha88KmeC8',
                    'endpoint': 'oss-cn-beijing-internal.aliyuncs.com',
                    'bucket_name': 'test-oss-binary',
                    'scheme': 'https'
                },
                'es':{
                    'user': 'zxtech',
                    'password': 'Zxod112_shining10',
                    'hosts': ["192.168.81.192:9200","192.168.81.193:9200"],
                    # 'port': 9200
                },
                'odps':{},
                'hive':{
                    'host':'192.168.82.74',
                    'port': 10000
                }
                }
            }
    # sal_obj = StorageAbstractLayer(config=config_obj)
    sal_obj = StorageAbstractLayer()
    # sal_obj.es_obj.init_es()
    # print(sal_obj.es_obj.es_cli.get(index='yexue_test', id='pp2'))
    # print('xxx')
    #
    table = 'configuration_file'
    # data = [('11', 1), ('12', 1)]
    data=[
        (11, 'task_scheduler', 'task_scheduler.yaml', '-V-3', b'gAF1ZXVzLg==', '2', '2', '2', 1689235278.341067, '2023-07-13 16:01:19'),
        (12, 'task_scheduler', 'task_scheduler.yaml', '-V-3', b'gAF1ZXVzLg==', '1', '2', '2', 1689235278.341067, '2023-07-13 16:01:19'),
        (12, 'pp2', 'task_scheduler.yaml', '-V-3', b'gAF1ZXVzLg==', '1', '1', '2', 1689235278.341067, '2023-07-13 16:01:19')
    ]
    # es操作 config.storage_utils.platform_make = 1 时
    read_sql = f"select * from {table}"  # 查询es时字符串类型必须用引号，否则会报错
    # delete_sql = "DELETE from trans where STATUS='1'"
    # sal_obj.Write(None, table, data, None)
    # res = sal_obj.Read(None, None, read_sql, None)
    # print(res.BIN)
    # sal_obj.Delete(None, table, delete_sql, None)
    #
    # oss操作 config.storage_utils.platform_make != 1 且 flag = 2 时
    # db = 'test'      # 组织格式为：<bucket>/<db>/<table>.csv
    # table = 'binary'  # 对应的oss object path 为：<bucket>/test/trans.csv
    # data = [('11', 1), ('12', 1)]
    # read_sql = "select * from trans where STATUS='1'"
    # delete_sql = "DELETE from trans where STATUS='1'"
    # sal_obj.Read(db, table, read_sql, 2)
    # sal_obj.Write(db, table, data, 2)
    # sal_obj.Delete(db, table, delete_sql, 2)
    #
    # # odps 操作 config.storage_utils.platform_make != 1 且 flag = 1 时
    # read_sql = "select * from szyy_power_json_es_org_no limit 11111"
    # sal_obj.Read(None, None, read_sql, 1)
    # hive操作
    sal_obj.hive_obj.init_hive()

    read_sql = 'select * from configuration_file'' limit where TIMESTAMP>=1690794323.8694088 and TIMESTAMP< 1690794443.8694088'
    # read_sql = 'select * from ypyi.configuration_file limit where `TIMESTAMP`>=1690794323.8694088 and `TIMESTAMP`<1690794443.8694088'
    # read_sql = 'select * from yexue.aclr_weather_hour limit 10'
    db = 'test'      # 组织格式为：<bucket>/<db>/<table>.csv
    result = sal_obj.Read(db, table, read_sql, None)
    print(result)
    # table = 'binary'  # 对应的oss object path 为：<bucket>/test/trans.csv
    # data = [('11', 1), ('12', 1)]
    # result = sal_obj.Write(db, table, data, None)
    # sal_obj.hive_obj.write(db, table, data)
    # read_sql = 'select * from ypyi.configuration_file limit 10'
    # read_sql = 'select * from yexue.aclr_weather_hour limit 10'
    # result = sal_obj.Read(None, None, read_sql, None)