import json
import os
from collections import defaultdict

import pandas as pd

from configs.db_connector import ProvincialPlatformConfigDev
from utils.db_config_utils import JdbcConfigUtils
from utils.json_utils import load_json


def is_not_ex_canteen_table(_table_name):
    _ex_canteen_table = ['sunshine_basic_school_basic_info', 'sunshine_basic_supplier_basic_info']
    return _table_name not in _ex_canteen_table


# 分割 DataFrame
def split_data_frame(df, chunk_size):
    chunks = [df[i:i + chunk_size] for i in range(0, df.shape[0], chunk_size)]
    return chunks


def snake_to_camel(snake_str):
    components = snake_str.split('_')
    # 将第一个单词保持原样，其余单词首字母大写
    return components[0] + ''.join(x.title() for x in components[1:])


def aggregate_results(group):
    # 获取所有列的值，除了'result'
    aggregated = group.iloc[0].copy()
    # 将'group['result']'的值转换为json数组合并后再转换为json字符串
    combined_results = []
    for result_str in group['result']:
        try:
            # 尝试将字符串转换为列表
            results_list = json.loads(result_str)
            combined_results.extend(results_list)
        except json.JSONDecodeError:
            continue
    # 对combined_results进行二次处理
    processed_results = defaultdict(set)
    for item in combined_results:
        field = item.get('field')
        error_type = item.get('errorType')
        if field and error_type:
            processed_results[field].add(error_type)
    # 转换为所需的格式
    final_results = []
    for field, error_types in processed_results.items():
        sorted_error_types = sorted(list(error_types))
        final_results.append({
            'field': field,
            'errorType': ','.join(sorted_error_types)
        })
    # 将合并后的结果设置到aggregated中
    aggregated['result'] = json.dumps(final_results)
    return aggregated


# 定义一个函数来处理每组数据
def aggregate_errors(group):
    new_row = {
        'required_error_number': group[group['error_type'] == '1']['error_number'].sum(),
        'dict_error_number': group[group['error_type'] == '2']['error_number'].sum(),
        'district_error_number': group[group['error_type'] == '3']['error_number'].sum(),
        'date_error_number': group[group['error_type'] == '4']['error_number'].sum(),
        'canteen_code_error_number': group[group['error_type'] == '5']['error_number'].sum(),
        'canteen_name_error_number': group[group['error_type'] == '6']['error_number'].sum(),
        'error_type': ','.join(map(str, group['error_type'].unique()))
    }
    new_row['error_number'] = max([
        new_row['required_error_number'],
        new_row['dict_error_number'],
        new_row['district_error_number'],
        new_row['date_error_number'],
        new_row['canteen_code_error_number'],
        new_row['canteen_name_error_number']
    ])
    return pd.Series(new_row)


def generate_empty_result(_row, _field_list):
    _result_info = []
    for _field in _field_list:
        _data_str = _row[f"""is_{_field}"""]
        if str(_data_str) == '-1':
            _result_info.append({
                'field': snake_to_camel(_field),
                'errorType': '1'
            })
    # 根据结果中的值情况, 设置result信息
    _result_info_str = json.dumps(_result_info, ensure_ascii=False)
    return _result_info_str


def generate_dict_result(_row, _field_list):
    _result_info = []
    for _field_info in _field_list:
        _field = _field_info['字段']
        _data_str = _row[f"""is_{_field}"""]
        if str(_data_str) == '-1':
            _result_info.append({
                'field': snake_to_camel(_field),
                'errorType': '2'
            })
    # 根据结果中的值情况, 设置result信息
    _result_info_str = json.dumps(_result_info, ensure_ascii=False)
    return _result_info_str


def generate_date_result(_row, _field_list):
    _result_info = []
    for _field_info in _field_list:
        _field = _field_info['字段']
        _data_str = _row[f"""is_{_field}"""]
        if str(_data_str) == '-1':
            _result_info.append({
                'field': snake_to_camel(_field),
                'errorType': '4'
            })
    # 根据结果中的值情况, 设置result信息
    _result_info_str = json.dumps(_result_info, ensure_ascii=False)
    return _result_info_str


def empty_check(_config, _table_cn_name, _table_name, _field_list):
    if len(_field_list) <= 0:
        return pd.DataFrame(), pd.DataFrame()
    _condition_sql_format = f"""
        ( LENGTH(IFNULL(FIELD_NAME, '')) = 0 )
    """
    _condition_sql_list = []

    _check_field_sql_format = f"""
        IF(LENGTH(IFNULL(FIELD_NAME, '')) = 0, -1, 1) AS is_FIELD_NAME
    """
    _check_field_sql_list = []
    _check_field_list = []
    for _field in _field_list:
        _condition_sql_list.append(_condition_sql_format.replace('FIELD_NAME', _field))
        _check_field_sql_list.append(_check_field_sql_format.replace('FIELD_NAME', _field))
        _check_field_list.append(f'is_{_field}')

    _final_condition_sql = '\nor\n'.join(_condition_sql_list)
    _final_check_field_sql = '\n,\n'.join(_check_field_sql_list)

    _is_not_ex_canteen_table = is_not_ex_canteen_table(_table_name)

    # 查询统计SQL
    _count_sql = f"""
        with result_data as (
            SELECT
                province_code,
                province,
                city_code,
                city,
                district_code,
                district
                {',canteen_code,canteen_name' if _is_not_ex_canteen_table else ''}
            FROM 
                {_table_name}
            WHERE 
            ({_final_condition_sql})
            and district_code is not null
            {'and canteen_code is not null' if _is_not_ex_canteen_table else ''}
        )
        SELECT
        province_code,
        province,
        city_code,
        city,
        district_code,
        district,
        {'canteen_code,canteen_name,' if _is_not_ex_canteen_table else ''}
        count(*) as error_number
        FROM
        result_data
        group by province_code, city_code, district_code{', canteen_code' if _is_not_ex_canteen_table else ''}
    """
    _final_count_sql = _count_sql
    _count_df = JdbcConfigUtils.get_table_data(_config, _final_count_sql)

    _data_sql = f"""
        ( SELECT
            *,
            {_final_check_field_sql}
        FROM 
            {_table_name}
        WHERE 
        ( {_final_condition_sql} )
        and province_code = 'PROVINCE_CODE_VALUE'
        and city_code = 'CITY_CODE_VALUE'
        and district_code = 'DISTRICT_CODE_VALUE'
        {"and canteen_code = 'CANTEEN_CODE_VALUE'" if _is_not_ex_canteen_table else ''}
        LIMIT 0, 5 )
    """
    _final_data_df_list = []
    _chunk_size = 200
    chunks = split_data_frame(_count_df, _chunk_size)
    for idx, chunk in enumerate(chunks):
        _chunk_data_sql_list = []
        # 对每个分片进行进一步处理
        for index, r in chunk.iterrows():
            _province_code = r['province_code']
            _city_code = r['city_code']
            _district_code = r['district_code']
            _final_data_sql = _data_sql.replace('PROVINCE_CODE_VALUE', _province_code).replace('CITY_CODE_VALUE',
                                                                                               _city_code).replace(
                'DISTRICT_CODE_VALUE', _district_code)
            if _is_not_ex_canteen_table:
                _canteen_code = r['canteen_code']
                _final_data_sql = _final_data_sql.replace('CANTEEN_CODE_VALUE', _canteen_code)
            _chunk_data_sql_list.append(_final_data_sql)
        _chunk_data_sql = '\n union \n'.join(_chunk_data_sql_list)
        _data_df = JdbcConfigUtils.get_table_data(_config, _chunk_data_sql)
        _final_data_df_list.append(_data_df)
    if len(_final_data_df_list) > 0:
        _final_data_df = pd.concat(_final_data_df_list, axis=0)
        _final_data_df['result'] = _final_data_df.apply(lambda row: generate_empty_result(row, _field_list), axis=1)
        # 删除辅助列
        _final_data_df.drop(columns=_check_field_list, inplace=True)
    else:
        _final_data_df = pd.DataFrame()
    return _count_df, _final_data_df


def dict_check(_config, _table_cn_name, _table_name, _field_list):
    if len(_field_list) <= 0:
        return pd.DataFrame(), pd.DataFrame()
    _temp_table_sql_format = f"""
		temp_FIELD_NAME as (
            SELECT
            sdi.item_value
            FROM
            sys_dict_item sdi
            LEFT JOIN sys_dict sd on sd.id = sdi.dict_id
            WHERE
            sd.dict_code = 'DICT_NAME'
        )
    """
    _temp_table_sql_list = []

    _join_sql_format = f"""
        LEFT JOIN temp_FIELD_NAME ON temp_FIELD_NAME.item_value = t.FIELD_NAME
    """
    _join_sql_list = []

    _condition_sql_format = f"""
        temp_FIELD_NAME.item_value IS NULL 
    """
    _condition_sql_list = []

    _check_field_sql_format = f"""
        IF(LENGTH(IFNULL(temp_FIELD_NAME.item_value, '')) = 0, -1, 1) AS is_FIELD_NAME
    """
    _check_field_sql_list = []
    _check_field_list = []
    for _field_info in _field_list:
        _field = _field_info['字段']
        _dict = _field_info['字典']
        _temp_table_sql_list.append(_temp_table_sql_format.replace('FIELD_NAME', _field).replace('DICT_NAME', _dict))
        _join_sql_list.append(_join_sql_format.replace('FIELD_NAME', _field).replace('DICT_NAME', _dict))
        _condition_sql_list.append(_condition_sql_format.replace('FIELD_NAME', _field).replace('DICT_NAME', _dict))
        _check_field_sql_list.append(_check_field_sql_format.replace('FIELD_NAME', _field).replace('DICT_NAME', _dict))
        _check_field_list.append(f'is_{_field}')

    _final_temp_table_sql = '\n,'.join(_temp_table_sql_list)
    _final_join_sql = '\n'.join(_join_sql_list)
    _final_condition_sql = '\nor\n'.join(_condition_sql_list)
    _final_check_field_sql = '\n,\n'.join(_check_field_sql_list)

    _is_not_ex_canteen_table = is_not_ex_canteen_table(_table_name)

    # 查询统计SQL
    _count_sql = f"""
        with 
		{_final_temp_table_sql},
		result_data as (
            SELECT
                t.province_code,
                t.province,
                t.city_code,
                t.city,
                t.district_code,
                t.district
                {',t.canteen_code,t.canteen_name' if _is_not_ex_canteen_table else ''}
            FROM 
                {_table_name} t
            {_final_join_sql}
            WHERE 
            ({_final_condition_sql})
            and t.district_code is not null
            {'and t.canteen_code is not null' if _is_not_ex_canteen_table else ''}
        )
        SELECT
        province_code,
        province,
        city_code,
        city,
        district_code,
        district,
        {'canteen_code,canteen_name,' if _is_not_ex_canteen_table else ''}
        count(*) as error_number
        FROM
        result_data
        group by province_code, city_code, district_code{', canteen_code' if _is_not_ex_canteen_table else ''}
    """
    _final_count_sql = _count_sql
    _count_df = JdbcConfigUtils.get_table_data(_config, _final_count_sql)

    _base_data_sql = f"""
        with 
		{_final_temp_table_sql}
    """
    _data_sql = f"""
        (
        SELECT
            t.*,
            {_final_check_field_sql}
        FROM 
            {_table_name} t
        {_final_join_sql}
        WHERE 
        ( {_final_condition_sql} )
        and t.province_code = 'PROVINCE_CODE_VALUE'
        and t.city_code = 'CITY_CODE_VALUE'
        and t.district_code = 'DISTRICT_CODE_VALUE'
        {"and t.canteen_code = 'CANTEEN_CODE_VALUE'" if _is_not_ex_canteen_table else ''}
        LIMIT 0, 5
        )
    """
    _final_data_df_list = []
    _chunk_size = 200
    chunks = split_data_frame(_count_df, _chunk_size)
    for idx, chunk in enumerate(chunks):
        _chunk_data_sql_list = []
        # 对每个分片进行进一步处理
        for index, r in chunk.iterrows():
            _province_code = r['province_code']
            _city_code = r['city_code']
            _district_code = r['district_code']
            _final_data_sql = _data_sql.replace('PROVINCE_CODE_VALUE', _province_code).replace('CITY_CODE_VALUE',
                                                                                               _city_code).replace(
                'DISTRICT_CODE_VALUE', _district_code)
            if _is_not_ex_canteen_table:
                _canteen_code = r['canteen_code']
                _final_data_sql = _final_data_sql.replace('CANTEEN_CODE_VALUE', _canteen_code)
            _chunk_data_sql_list.append(_final_data_sql)
        _chunk_data_sql = '\n union \n'.join(_chunk_data_sql_list)
        _final_chunk_data_sql = _base_data_sql + _chunk_data_sql
        _data_df = JdbcConfigUtils.get_table_data(_config, _final_chunk_data_sql)
        _final_data_df_list.append(_data_df)
    if len(_final_data_df_list) > 0:
        _final_data_df = pd.concat(_final_data_df_list, axis=0)
        _final_data_df['result'] = _final_data_df.apply(lambda row: generate_dict_result(row, _field_list), axis=1)
        # 删除辅助列
        _final_data_df.drop(columns=_check_field_list, inplace=True)
    else:
        _final_data_df = pd.DataFrame()
    return _count_df, _final_data_df


def area_name_check(_config, _table_cn_name, _table_name):
    _is_not_ex_canteen_table = is_not_ex_canteen_table(_table_name)

    # 查询统计SQL
    _count_sql = f"""
        SELECT
        province_code,
        province,
        city_code,
        city,
        district_code,
        district,
        {'canteen_code,canteen_name,' if _is_not_ex_canteen_table else ''}
        count(*) as error_number
        FROM
        {_table_name}
        where LENGTH(IFNULL(district_code, '')) = 0
        and district_code is not null
        {'and canteen_code is not null' if _is_not_ex_canteen_table else ''}
        GROUP BY province_code, city_code, district_code{', canteen_code' if _is_not_ex_canteen_table else ''}
    """
    _final_count_sql = _count_sql
    _count_df = JdbcConfigUtils.get_table_data(_config, _final_count_sql)

    _data_sql = f"""
    (
        SELECT
        *
        FROM
        {_table_name}
        where LENGTH(IFNULL(district_code, '')) = 0 
        and province_code = 'PROVINCE_CODE_VALUE'
        and city_code = 'CITY_CODE_VALUE'
        and district_code = 'DISTRICT_VALUE'
        {"and canteen_code = 'CANTEEN_CODE_VALUE'" if _is_not_ex_canteen_table else ''}
        LIMIT 0, 5
    )
    """
    _final_data_df_list = []
    _chunk_size = 200
    chunks = split_data_frame(_count_df, _chunk_size)
    for idx, chunk in enumerate(chunks):
        _chunk_data_sql_list = []
        # 对每个分片进行进一步处理
        for index, r in chunk.iterrows():
            _province_code = r['province_code']
            _city_code = r['city_code']
            _district_code = r['district_code']
            _final_data_sql = _data_sql.replace('PROVINCE_CODE_VALUE', _province_code).replace('CITY_CODE_VALUE',
                                                                                               _city_code).replace(
                'DISTRICT_CODE_VALUE', _district_code)
            if _is_not_ex_canteen_table:
                _canteen_code = r['canteen_code']
                _final_data_sql = _final_data_sql.replace('CANTEEN_CODE_VALUE', _canteen_code)
            _chunk_data_sql_list.append(_final_data_sql)
        _chunk_data_sql = '\n union \n'.join(_chunk_data_sql_list)
        _data_df = JdbcConfigUtils.get_table_data(_config, _chunk_data_sql)
        _final_data_df_list.append(_data_df)
    if len(_final_data_df_list) > 0:
        _final_data_df = pd.concat(_final_data_df_list, axis=0)
        _result_info = [{
            'field': 'district',
            'errorType': '3'
        }]
        _result_info_str = json.dumps(_result_info, ensure_ascii=False)
        _final_data_df['result'] = _result_info_str
    else:
        _final_data_df = pd.DataFrame()
    return _count_df, _final_data_df


def date_check(_config, _table_cn_name, _table_name, _field_list):
    if len(_field_list) <= 0:
        return pd.DataFrame(), pd.DataFrame()
    _condition_sql_format = f"""
        (STR_TO_DATE(FIELD_NAME, 'DATE_TYPE') IS NULL OR FIELD_NAME NOT REGEXP 'REGEXP_TEXT')
    """
    _condition_sql_list = []

    _check_field_sql_format = f"""
        IF(STR_TO_DATE(FIELD_NAME, 'DATE_TYPE') IS NULL OR  FIELD_NAME NOT REGEXP 'REGEXP_TEXT', -1, 1) AS is_CHECK_NAME
    """
    _check_field_sql_list = []
    _check_field_list = []
    for _field_info in _field_list:
        _field = _field_info['字段']
        _type = _field_info['类型']
        _regexp = _field_info['类型正则表达式']
        if _type == '%Y-%m':
            _condition_sql_list.append(
                _condition_sql_format.replace(
                    'FIELD_NAME', f"CONCAT({_field}, '-01')").replace('DATE_TYPE',
                                                                      '%Y-%m-%d').replace(
                    'REGEXP_TEXT', '^\\\\d{4}-\\\\d{2}-\\\\d{2}$'))
            _check_field_sql_list.append(
                _check_field_sql_format.replace('FIELD_NAME', f"CONCAT({_field}, '-01')").replace('DATE_TYPE',
                                                                                                  '%Y-%m-%d').replace(
                    'REGEXP_TEXT', '^\\\\d{4}-\\\\d{2}-\\\\d{2}$').replace('CHECK_NAME', _field))
        else:
            _condition_sql_list.append(
                _condition_sql_format.replace('FIELD_NAME', _field).replace('DATE_TYPE', _type).replace('REGEXP_TEXT',
                                                                                                        _regexp))
            _check_field_sql_list.append(
                _check_field_sql_format.replace('FIELD_NAME', _field).replace('DATE_TYPE', _type).replace(
                    'REGEXP_TEXT', _regexp).replace('CHECK_NAME', _field))
        _check_field_list.append(f'is_{_field}')

    _final_condition_sql = '\nor\n'.join(_condition_sql_list)
    _final_check_field_sql = '\n,\n'.join(_check_field_sql_list)

    _is_not_ex_canteen_table = is_not_ex_canteen_table(_table_name)

    # 查询统计SQL
    _count_sql = f"""
        SELECT
        province_code,
        province,
        city_code,
        city,
        district_code,
        district,
        {'canteen_code,canteen_name,' if _is_not_ex_canteen_table else ''}
        count(*) as error_number
        FROM 
            {_table_name}
        WHERE 
        ({_final_condition_sql})
        and district_code is not null
        {'and canteen_code is not null' if _is_not_ex_canteen_table else ''}
        GROUP BY province_code, city_code, district_code{', canteen_code' if _is_not_ex_canteen_table else ''}
    """
    _final_count_sql = _count_sql
    _count_df = JdbcConfigUtils.get_table_data(_config, _final_count_sql)

    _data_sql = f"""
        (
            SELECT
                *,
                {_final_check_field_sql}
            FROM 
                {_table_name}
            WHERE 
            ( {_final_condition_sql} )
            and province_code = 'PROVINCE_CODE_VALUE'
            and city_code = 'CITY_CODE_VALUE'
            and district_code = 'DISTRICT_CODE_VALUE'
            {"and canteen_code = 'CANTEEN_CODE_VALUE'" if _is_not_ex_canteen_table else ''}
            LIMIT 0, 5
        )
    """
    _final_data_df_list = []
    _chunk_size = 200
    chunks = split_data_frame(_count_df, _chunk_size)
    for idx, chunk in enumerate(chunks):
        _chunk_data_sql_list = []
        # 对每个分片进行进一步处理
        for index, r in chunk.iterrows():
            _province_code = r['province_code']
            _city_code = r['city_code']
            _district_code = r['district_code']
            _final_data_sql = _data_sql.replace('PROVINCE_CODE_VALUE', _province_code).replace('CITY_CODE_VALUE',
                                                                                               _city_code).replace(
                'DISTRICT_CODE_VALUE', _district_code)
            if _is_not_ex_canteen_table:
                _canteen_code = r['canteen_code']
                _final_data_sql = _final_data_sql.replace('CANTEEN_CODE_VALUE', _canteen_code)
            _chunk_data_sql_list.append(_final_data_sql)
        _chunk_data_sql = '\n union \n'.join(_chunk_data_sql_list)
        _data_df = JdbcConfigUtils.get_table_data(_config, _chunk_data_sql)
        _final_data_df_list.append(_data_df)
    if len(_final_data_df_list) > 0:
        _final_data_df = pd.concat(_final_data_df_list, axis=0)
        _final_data_df['result'] = _final_data_df.apply(lambda row: generate_date_result(row, _field_list), axis=1)
        # 删除辅助列
        _final_data_df.drop(columns=_check_field_list, inplace=True)
    else:
        _final_data_df = pd.DataFrame()
    return _count_df, _final_data_df


def canteen_code_check(_config, _table_cn_name, _table_name):
    _is_not_ex_canteen_table = is_not_ex_canteen_table(_table_name)

    # 查询统计SQL
    _count_sql = f"""
        SELECT
        province_code,
        province,
        city_code,
        city,
        district_code,
        district,
        {'canteen_code,canteen_name,' if _is_not_ex_canteen_table else ''}
        count(*) as error_number
        FROM
        {_table_name}
        where LOWER(canteen_code) not REGEXP '^ct_[0-9]{{6}}_[a-z]+$'
        and district_code is not null
        GROUP BY province_code, city_code, district_code{', canteen_code' if _is_not_ex_canteen_table else ''}
    """
    _final_count_sql = _count_sql
    _count_df = JdbcConfigUtils.get_table_data(_config, _final_count_sql)

    _data_sql = f"""
    (
        SELECT
        *
        FROM
        {_table_name}
        WHERE LOWER(canteen_code) not REGEXP '^ct_[0-9]{{6}}_[a-z]+$'
        and province_code = 'PROVINCE_CODE_VALUE'
        and city_code = 'CITY_CODE_VALUE'
        and district_code = 'DISTRICT_CODE_VALUE'
        {"and canteen_code = 'CANTEEN_CODE_VALUE'" if _is_not_ex_canteen_table else ''}
        LIMIT 0, 5
    )
    """
    _final_data_df_list = []
    _chunk_size = 200
    chunks = split_data_frame(_count_df, _chunk_size)
    for idx, chunk in enumerate(chunks):
        _chunk_data_sql_list = []
        # 对每个分片进行进一步处理
        for index, r in chunk.iterrows():
            _province_code = r['province_code']
            _city_code = r['city_code']
            _district_code = r['district_code']
            _final_data_sql = _data_sql.replace('PROVINCE_CODE_VALUE', _province_code).replace('CITY_CODE_VALUE',
                                                                                               _city_code).replace(
                'DISTRICT_CODE_VALUE', _district_code)
            if _is_not_ex_canteen_table:
                _canteen_code = r['canteen_code']
                _final_data_sql = _final_data_sql.replace('CANTEEN_CODE_VALUE', _canteen_code)
            _chunk_data_sql_list.append(_final_data_sql)
        _chunk_data_sql = '\n union \n'.join(_chunk_data_sql_list)
        _data_df = JdbcConfigUtils.get_table_data(_config, _chunk_data_sql)
        _final_data_df_list.append(_data_df)
    if len(_final_data_df_list) > 0:
        _final_data_df = pd.concat(_final_data_df_list, axis=0)
        _result_info = [{
            'field': 'canteenCode',
            'errorType': '5'
        }]
        _result_info_str = json.dumps(_result_info, ensure_ascii=False)
        _final_data_df['result'] = _result_info_str
    else:
        _final_data_df = pd.DataFrame()
    return _count_df, _final_data_df


def canteen_name_check(_config, _table_cn_name, _table_name):
    _is_not_ex_canteen_table = is_not_ex_canteen_table(_table_name)

    # 查询统计SQL
    _count_sql = f"""
        with temp1 as (
            SELECT
            city_code,
			canteen_code,
            count(DISTINCT canteen_name) as num
            FROM
            {_table_name}
            where district_code is not null
            GROUP BY city_code, canteen_code
			Having num > 1
        )
        SELECT
        t1.province_code,
        t1.province,
        t1.city_code,
        t1.city,
        t1.district_code,
        t1.district,
        {'t1.canteen_code,t1.canteen_name,' if _is_not_ex_canteen_table else ''}
        count(*) as error_number
        FROM
        {_table_name} t1
        INNER JOIN temp1 on temp1.city_code = t1.city_code and temp1.canteen_code = t1.canteen_code 
        GROUP BY t1.province_code, t1.city_code, t1.district_code{', t1.canteen_code' if _is_not_ex_canteen_table else ''}
    """
    _final_count_sql = _count_sql
    _count_df = JdbcConfigUtils.get_table_data(_config, _final_count_sql)

    _data_sql = f"""
    (   
        SELECT
        *
        FROM
        {_table_name}
        where 
        province_code = 'PROVINCE_CODE_VALUE'
        and city_code = 'CITY_CODE_VALUE'
        and district_code = 'DISTRICT_CODE_VALUE'
        {"and canteen_code = 'CANTEEN_CODE_VALUE'" if _is_not_ex_canteen_table else ''}
        LIMIT 0, 5
    )  
    """
    _final_data_df_list = []
    _chunk_size = 200
    chunks = split_data_frame(_count_df, _chunk_size)
    for idx, chunk in enumerate(chunks):
        _chunk_data_sql_list = []
        # 对每个分片进行进一步处理
        for index, r in chunk.iterrows():
            _province_code = r['province_code']
            _city_code = r['city_code']
            _district_code = r['district_code']
            _final_data_sql = _data_sql.replace('PROVINCE_CODE_VALUE', _province_code).replace('CITY_CODE_VALUE',
                                                                                               _city_code).replace(
                'DISTRICT_CODE_VALUE', _district_code)
            if _is_not_ex_canteen_table:
                _canteen_code = r['canteen_code']
                _final_data_sql = _final_data_sql.replace('CANTEEN_CODE_VALUE', _canteen_code)
            _chunk_data_sql_list.append(_final_data_sql)
        _chunk_data_sql = '\n union \n'.join(_chunk_data_sql_list)
        _data_df = JdbcConfigUtils.get_table_data(_config, _chunk_data_sql)
        _final_data_df_list.append(_data_df)
    if len(_final_data_df_list) > 0:
        _final_data_df = pd.concat(_final_data_df_list, axis=0)
        _result_info = [{
            'field': 'canteenName',
            'errorType': '6'
        }]
        _result_info_str = json.dumps(_result_info, ensure_ascii=False)
        _final_data_df['result'] = _result_info_str
    else:
        _final_data_df = pd.DataFrame()
    return _count_df, _final_data_df


if __name__ == '__main__':
    _data = 'data'
    _result = 'result'
    _config = ProvincialPlatformConfigDev()

    _table_json = load_json(os.path.join(_data, '校验数据表.json'))
    _area_json = load_json(os.path.join(_data, '省市区校验.json'))

    # 表和异常情况的结果list
    # 表名, 市州, 问题类型, 数量
    _error_info_list = []

    _count_df_list = []
    # 按照表循环
    for _table_cn_name in _table_json:
        _table_data = _table_json[_table_cn_name]
        _table_name = _table_data['表名']
        _check_table_name = _table_data['校验结果表名']
        # 先清空表数据
        JdbcConfigUtils.truncate_table_data(_config.DATABASE_NAME, _check_table_name, _config)

        _save_result_path = os.path.join(_result)
        if not os.path.exists(_save_result_path):
            os.makedirs(_save_result_path, exist_ok=True)

        _data_df_list = []

        # 空值校验
        _empty_count_df, _empty_data_df = empty_check(_config, _table_cn_name, _table_name, _table_data['字段校验'])
        _empty_count_df['table_name'] = _table_cn_name
        _empty_count_df['error_type'] = '1'
        _count_df_list.append(_empty_count_df)
        _data_df_list.append(_empty_data_df)

        # 字典校验
        _dict_count_df, _dict_data_df = dict_check(_config, _table_cn_name, _table_name, _table_data['字典校验'])
        _dict_count_df['table_name'] = _table_cn_name
        _dict_count_df['error_type'] = '2'
        _count_df_list.append(_dict_count_df)
        _data_df_list.append(_dict_data_df)

        # 区县校验
        _area_count_df, _area_data_df = area_name_check(_config, _table_cn_name, _table_name)
        _area_count_df['table_name'] = _table_cn_name
        _area_count_df['error_type'] = '3'
        _count_df_list.append(_area_count_df)
        _data_df_list.append(_area_data_df)

        # 日期校验
        _date_count_df, _date_data_df = date_check(_config, _table_cn_name, _table_name, _table_data['日期校验'])
        _date_count_df['table_name'] = _table_cn_name
        _date_count_df['error_type'] = '4'
        _count_df_list.append(_date_count_df)
        _data_df_list.append(_date_data_df)

        if _table_data['食堂校验']:
            # 食堂编号校验
            _canteen_code_count_df, _canteen_code_data_df = canteen_code_check(_config, _table_cn_name, _table_name)
            _canteen_code_count_df['table_name'] = _table_cn_name
            _canteen_code_count_df['error_type'] = '5'
            _count_df_list.append(_canteen_code_count_df)
            _data_df_list.append(_canteen_code_data_df)

            # 食堂名称校验
            _canteen_name_count_df, _canteen_name_data_df = canteen_name_check(_config, _table_cn_name, _table_name)
            _canteen_name_count_df['table_name'] = _table_cn_name
            _canteen_name_count_df['error_type'] = '6'
            _count_df_list.append(_canteen_name_count_df)
            _data_df_list.append(_canteen_name_data_df)

        if len(_data_df_list) <= 0:
            continue
        try:
            _data_df = pd.concat(_data_df_list, axis=0)
        except Exception as e:
            print(e)
            continue
        if _data_df.shape[0] > 0:
            # 使用 groupby 和 apply 进行聚合
            _data_df = _data_df.groupby('id').apply(aggregate_results).reset_index(drop=True)
            _data_df.to_excel(os.path.join(_save_result_path, f'{_table_cn_name}.xlsx'), index=False)
            JdbcConfigUtils.insert_table_data(_config.DATABASE_NAME, _check_table_name, _data_df, _config)

    _count_df = pd.concat(_count_df_list, axis=0)
    # 分组并应用上述函数
    grouped_df = _count_df.groupby(
        ['province_code', 'province', 'city_code', 'city', 'district_code', 'district', 'canteen_code', 'canteen_name',
         'table_name'], dropna=False
    ).apply(aggregate_errors).reset_index()
    grouped_df.to_excel(os.path.join(_result, '整体统计.xlsx'), index=False)


