import re

from mysql_tool import extract_all_contents
from pyspark.sql import SparkSession

class Transform():
    def __init__(self, sql=None, rule_name=None, db_name_config=None):
        self.sql = sql
        self.db_name_config = db_name_config
        self.rule_name = rule_name

    def process_key_c_1(self):
        # ●分割sql
        # ●删除on duplcate key 后面语句
        # ●删除delete/truncate/set 语句
        # ●提取库名、表名、sql语句
        # ●多个sql中，全部都为insert into
        # ●替换大禹治水变量
        # ●替换库名和表名: wotu_std. wotu_mid, wotu_cdm, di_dim, wotu_dim等名称，使⽤传递进来的名称
        # ●添加重分区
        spark = SparkSession.builder.appName("genrate_tidb_2_sparksql"). \
            config('spark.sql.parquet.binaryAsString', 'true'). \
            config('spark.sql.parquet.enableVectorizedReader', 'false'). \
            enableHiveSupport(). \
            getOrCreate()
        spark.sparkContext.setLogLevel("ERROR")
        final_sql_list = []
        sql_list = self.sql.split(";")
        ycdtpvWhere = "hospital_code in ({})".format(self.db_name_config['hospital_code_list'])
        ycdtp14Where = "hospital_code in ({})".format(self.db_name_config['hospital_code_list'])
        ycdtallpvWhere = "hospital_code in ({})".format(self.db_name_config['hospital_code_list'])
        filterb06_1 = "record_type not in ('转入记录','转出记录','阶段小结','72小时谈话记录')"

        for one_sql in sql_list:
            one_sql = re.sub("on duplicate key.*", '', one_sql)
            if re.findall("delete |truncate |set ", one_sql):
                one_sql = re.sub("delete .*|truncate .*|set .*", '', one_sql)
            if len(one_sql.strip()) < 5:
                continue
            table_name_pt = re.search(
                "(insert|replace).{0,3}into (.{0,30}[0-9a-z_]+) ?\\. ?([0-9a-z_]+) {0,5}\\([0-9a-z_, ]+\\)( {0,5}(select|with).*)",
                one_sql)
            # db_name = table_name_pt.group(2) #
            table_name = table_name_pt.group(3) #
            table_sql = table_name_pt.group(4) #
            table_sql = re.sub('\$\{sys\!ycdtpv(W|w)here\}', "{}".format(ycdtpvWhere), table_sql)
            table_sql = re.sub('\$\{sys\!ycdtp([0-9]+)(W|w)here\}', "{}".format(ycdtp14Where), table_sql)
            table_sql = re.sub('\$\{sys\!filterb06_1\}', "{}".format(filterb06_1), table_sql)
            table_sql = re.sub('\$\{sys\!ycdtallpv(W|w)here\}', "{}".format(ycdtallpvWhere), table_sql)
            table_sql = re.sub('\$\{sys\!ycdtpastpv(W|w)here\}', "{}".format(ycdtp14Where), table_sql)
            table_sql = re.sub('\$\{sys\!cancer(S|s)pecies\}', "{}".format(self.db_name_config['cancer_species']), table_sql)
            table_sql = re.sub('\$\{sys\!center(H|h)ospital(C|c)ode\}', "{}".format("1=1"), table_sql)
            table_sql = re.sub('wotu_std ?\\.', "{}.".format(self.db_name_config['wotu_std']), table_sql)
            table_sql = re.sub('wotu_mid ?\\.', "{}.".format(self.db_name_config['wotu_mid']), table_sql)
            table_sql = re.sub('wotu_cdm ?\\.', "{}.".format(self.db_name_config['wotu_cdm']), table_sql)
            table_sql = re.sub('di_dim ?\\.', "{}.".format(self.db_name_config['di_dim']), table_sql)
            table_sql = re.sub('wotu_dim ?\\.', "{}.".format(self.db_name_config['wotu_dim']), table_sql)

            sql_prefix = "insert into table {}.{} partition(hospital_code)".format(self.db_name_config['wotu_std'],
                                                                                   table_name)

            df = spark.sql(table_sql)
            df.createOrReplaceTempView('tmp_std')
            target_filed_list = df.columns

            data_field = spark.sql("select * from  {}.{} limit 1".format(self.db_name_config['wotu_std'], table_name))
            col_list = data_field.columns
            if "hospital_code" in col_list:
                col_list.remove("hospital_code")
            field_mapping_list = []
            for i in range(len(col_list)):
                if col_list[i] in target_filed_list:
                    field_mapping_list.append(" cast ({} as string) as {}".format(col_list[i], col_list[i]))
                elif col_list[i] in ("id"):
                    field_mapping_list.append(" unix_timestamp()  as {}".format(col_list[i]))
                elif col_list[i] in ("record_status"):
                    field_mapping_list.append(" '1' as {}".format(col_list[i]))
                elif col_list[i] in ("create_time", "rel_biz_time"):
                    field_mapping_list.append(
                        " from_unixtime(unix_timestamp(),'yyyy-MM-dd HH:mm:ss') as {}".format(col_list[i]))
                else:
                    field_mapping_list.append(" '' as {}".format(col_list[i]))
            field_mapping_str = " ,".join(field_mapping_list)

            new_table_sql ="""  {} select {},hospital_code   
                    from (
                    {}
                    ) tmp_std """.format(sql_prefix, field_mapping_str, new_table_sql)
            final_sql_list.append(new_table_sql)

        self.sql = ';'.join(final_sql_list)

    def time_unit(self, sql_fragment, function_name):
        """
        :param sql_fragment:    传入的单行sql片段 eg: current(), interval (1) month
        :param function_name:   date_add or date_sub
        :return:
        """

        pattern = re.compile(r"INTERVAL", re.IGNORECASE)
        """ only one match """
        for match in pattern.finditer(sql_fragment):
            start_pos = match.start()
            end_pos = match.end()
            # 获取时间
            date_str = sql_fragment[0: start_pos]
            left_bracket = 0
            for index in range(end_pos, len(sql_fragment)):
                if sql_fragment[index] == '(':
                    left_bracket = index
                    break
            # 获取时间表达式
            counter = 0
            right_bracket = 0
            for index in range(left_bracket, len(sql_fragment)):
                if sql_fragment[index] == '(':
                    counter += 1
                if sql_fragment[index] == ')':
                    counter -= 1
                if counter == 0:
                    right_bracket = index
                    break
            # 构造时间单位
            time_unit_col = []
            for index in range(right_bracket + 1, len(sql_fragment)):
                if sql_fragment[index] == ')':
                    break
                if sql_fragment[index] != ' ':
                    time_unit_col.append(sql_fragment[index])
            time_unit_str = ''.join(time_unit_col)
            time_express = sql_fragment[left_bracket: right_bracket + 1]
            return f"yy_{function_name}(" + date_str + f"cast ({time_express} as int), '" + time_unit_str + "')"

    def replace_time_unit(self, function_name):
        date_sqls = extract_all_contents(self.sql, rf"{function_name}\(")
        if date_sqls:
            for date_sql in date_sqls:
                new_date_sql = self.time_unit(date_sql, function_name)
                date_sql = re.escape(f"{function_name}({date_sql})")
                self.sql = re.compile(date_sql, re.IGNORECASE).sub(new_date_sql, self.sql)

    def process_key_c_4(self):
        """ deal date_add or date_sub """
        self.replace_time_unit('date_add')
        self.replace_time_unit('date_sub')
        return self.sql

    def process_key_c_7(self):
        """处理键'b'的逻辑"""
        regex_pattern = r'GROUP_CONCAT\('
        contents = extract_all_contents(self.sql, regex_pattern)
        if contents:
            for sql_part in contents:
                new_sql_part = re.compile(r"separator", re.IGNORECASE).sub(",", sql_part)
                if "ORDER BY" in sql_part.upper():
                    new_sql_part = re.compile(r"order by", re.IGNORECASE).sub(',', new_sql_part)
                    new_sql_part = re.compile(r"desc", re.IGNORECASE).sub(",'desc'", new_sql_part)
                    new_sql_part = re.compile(r"asc", re.IGNORECASE).sub("',asc'", new_sql_part)
                    sql_part = "GROUP_CONCAT\(" + sql_part + "\)"
                    new_sql_part = "GROUP_CONCAT_AND_ORDER(" + new_sql_part + ")"
                else:
                    sql_part = "GROUP_CONCAT\(" + sql_part + "\)"
                    new_sql_part = "GROUP_CONCAT(" + new_sql_part + ")"
                sql_part = re.escape(sql_part)
                self.sql = re.compile(sql_part, re.IGNORECASE).sub(new_sql_part, self.sql)
        return self.sql

    def process_key_c_8(self):
        """处理键'b'的逻辑"""
        # json_unquote(json_extract(doc, path_literal))
        regex_pattern = r'JSON_UNQUOTE\('
        contents = extract_all_contents(self.sql, regex_pattern)
        if contents:
            for sql_part in contents:
                new_sql_part = re.compile(r"json_extract", re.IGNORECASE).sub("", sql_part)
                new_sql_part = new_sql_part.strip()
                if new_sql_part.startswith("("):
                    new_sql_part = new_sql_part[1:]
                # 删除末尾的右括号
                if new_sql_part.endswith(")"):
                    new_sql_part = new_sql_part[:-1]
                sql_part = "JSON_UNQUOTE(" + sql_part + ")"
                new_sql_part = "GET_JSON_OBJECT(" + new_sql_part + ")"
                sql_part = re.escape(sql_part)
                self.sql = re.compile(sql_part, re.IGNORECASE).sub(new_sql_part, self.sql)
        return self.sql

    def messages(self) -> str:
        """
        根据字典的键迭代调用方法，将所有方法返回的消息拼接成一个字符串。
        :param keys_to_process: 包含键的列表，用于决定调用哪些方法。
        :return: 所有方法返回消息的拼接字符串。
        """
        message = ''
        method_name = f"process_key_{self.rule_name}"
        # 检查方法是否存在
        if hasattr(self, method_name) and callable(getattr(self, method_name)):
            message = getattr(self, method_name)()
        return message
### 测试
### Transform(sql="select group_concat(a separator ','),group_concat(a separator ',' order by a desc) from table1;",rule_name="111").process_key_c_7()

# str1 = Transform(sql="select JSON_UNQUOTE(json_extract(content,'$.name')) from table1;",rule_name="111").process_key_c_8()
# print(str1)

# str1 = Transform(sql="select datE_sub(a,interval (1+2+(3)) day) from table1;",rule_name="111").process_key_c_9()
# print(str1)
