from __future__ import annotations
import os
import re
import time
import h5py
import traceback
import numpy as np
import pandas as pd
from itertools import chain
from functools import partial
from collections import Counter
from pyexcelerate import Workbook
from decimal import Decimal, ROUND_HALF_UP
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from PyQt6.QtCore import QThread, pyqtSignal, QCoreApplication

import _config as cfg
import _cal as cal
import _plot_html as plot_html


def read_one_file_gkd(path_csv):
    """ 读取国控点原始数据——单个csv文件
        path_csv:csv文件绝对路径
    
        return: dict: {'PM2_5': pd.DataFrame, 'PM10': ...}
    
    单进程
    2023-08-27 v1
    2023-10-09 v2 如果原始数据是只含表头的控数据，则直接跳过
    """

    # 读取csv文件, 决速1
    df1 = pd.read_csv(path_csv, engine='c', low_memory=False, encoding='utf-8')

    # 判断待读取数据是否为Empty DataFrame，v2新增
    if df1.empty:
        return None

    # 删除df1中的重复列
    for col in df1.columns:
        if '.1' in col:
            del df1[col]

    title0 = ['datetime', 'AQI', 'NO2', 'SO2', 'CO', 'O3', 'PM2.5', 'PM10']
    # 筛选需要的type类型
    df2 = df1[df1['type'].isin(title0[1:])].copy()

    # 生成datetime列
    df2['hour'] = [str(i).rjust(2, '0') for i in df2['hour']]
    df2['datehour'] = df2['date'].map(str) + df2['hour']
    df2['datetime'] = [i[:4] + '-' + i[4:6] + '-' + i[6:8] + ' ' + i[8:10] + ':00:00' for i in df2['datehour']]

    # 删除不必要的列
    df2 = df2.loc[:, ['datetime', 'type'] + df1.columns.values.tolist()[3:]]

    # 转换datetime列为时间戳,便于储存至h5文件
    df2['datetime'] = pd.to_datetime(df2['datetime'], format='%Y-%m-%d %H:%M:%S')
    # df2['datetime'] = [i.timestamp() for i in df2['datetime']]

    df2.set_index('datetime', inplace=True)

    # 循环每个specie
    dict_species = {}
    for specie in title0[1:]:
        df3 = df2[df2['type'] == specie].copy()
        df3.drop(columns=['type'], axis=1, inplace=True)
        # df3.reset_index(inplace=True, drop=True)

        if specie == 'PM2.5':
            dict_species['PM2_5'] = df3
        else:
            dict_species[specie] = df3

    return dict_species


def df2h5(data_: pd.DataFrame, path_hdf5):
    """ 将读取的国控点数据（pd.DataFrame）保存至数据库（hdf5），仅包含某年全年的数据

        data_: index为datetime，各列为每个站点的数据
        path_hdf5: 数据保存目标路径    
    
    无返回值
    2023-08-27 v1
    """

    # 传参
    df_data = data_

    # 删除全是NaN的行
    df_data.dropna(how='all', axis=0, inplace=True)

    # 删除全是NaN的列
    df_data.dropna(how='all', axis=1, inplace=True)

    # 按列名排序
    df_data.sort_index(axis=1, inplace=True)

    # 用-999替换NaN
    # df_data.fillna(-999, inplace=True)

    # 数据类型转换，节省存储空间
    df_data = df_data.astype(np.float32)
    # df_data = df_data.astype(np.int16)

    """ 写入hdf5 """
    # 新建hdf5文件，存在则替换
    f = h5py.File(path_hdf5, 'a')  # 打开h5文件

    # 数据年份
    year_ = str(df_data.index[0].year)

    if year_ in f.keys():
        del f[year_]

    # 新建dataset并赋值
    f.create_dataset(year_, data=df_data.to_numpy(), shuffle='T', compression='lzf')
    # f.create_dataset(year_, data=df_data.to_numpy(), shuffle='T', compression='gzip', compression_opts=1)

    # 写入属性-表头
    f[year_].attrs['columns'] = df_data.columns.tolist()

    # 写入属性-时间索引（先将DatetimeIndex转换为时间戳）
    f[year_].attrs['datetime'] = (df_data.index.astype('int64') // 10 ** 9).astype('int32')

    # 关闭文件
    f.close()


class Csv2Hdf(QThread):
    """ 从原始数据（*.csv）生成hdf5数据库 """

    # 信号槽：状态值 {'text': , 'value': }
    signal_progress = pyqtSignal(dict)

    def __init__(self, dir_csv, dir_h5):

        super().__init__()

        self.dir_csv = dir_csv
        self.dir_h5 = dir_h5

        # 物种列表
        self.list_species = ['AQI', 'NO2', 'SO2', 'CO', 'O3', 'PM2_5', 'PM10']

        # 读取原始数据csv文件路径列表
        list_files_csv = [os.path.join(self.dir_csv, i)
                          for i in os.listdir(self.dir_csv)
                          if os.path.splitext(i)[1] == '.csv'
                          ]
        # print(list_files_csv)

        # 原始文件对应的日期，pd.DatetimeIndex
        dt_index = pd.to_datetime(
            arg=[os.path.splitext(i)[0].split('_')[-1] for i in list_files_csv],
            format='%Y%m%d'
        )

        # 转化为pd.Series
        self.series_csv = pd.Series(data=list_files_csv, index=dt_index)

        # 进度条值-总步骤
        self.total_step = dt_index.shape[0] + len(self.list_species) * (dt_index[-1].year - dt_index[0].year + 1) * 2

        # 当前步骤
        self.current_step = 0

    def done_callback(self, future_=None, text_=''):

        """ 每一个读取数据进程结束后调用 """

        # 当前进度值
        self.current_step += 1

        progress_value = self.current_step * 100 / self.total_step
        print('已完成:%.2f %%' % progress_value, end='\r')

        if 99.5 < progress_value < 100:
            progress_value = 99

        # 发送状态信号
        self.signal_progress.emit({'text': text_, 'value': progress_value})

    def run(self):

        # 按年处理保存数据
        for year, series_year in self.series_csv.groupby(by=self.series_csv.index.year):

            # print(year)

            # 目标年csv文件列表
            list_files_csv_year = series_year.to_list()

            # 目标年数据临时保存至列表
            list_temp_year = []

            """ 读取数据-进程池 """
            # 发送状态信号
            self.signal_progress.emit({'text': '提交进程池任务...', 'value': None})

            with ProcessPoolExecutor(max_workers=4) as pool:
                for file in list_files_csv_year:
                    # 提交任务
                    future_file = pool.submit(read_one_file_gkd, file)

                    # 回调
                    future_file.add_done_callback(
                        partial(self.done_callback, text_='读取数据完成：' + file)
                    )  # 使用partial避免闭包的问题

                    # 结果存入列表
                    list_temp_year.append(future_file)

            # 提取进程池运行完的数据
            list_temp_year = [i.result() for i in list_temp_year]
            
            # 剔除其中为None的数据
            list_temp_year = [i for i in list_temp_year if i is not None]

            # 整理数据
            dict_temp_year = dict()
            for s in self.list_species:

                # 待合并数据
                list_df_s = [d[s] for d in list_temp_year]

                # 找到所有不同的列名，并排序
                unique_columns = sorted(set(chain(*[df.columns for df in list_df_s])))

                # 对每个DataFrame进行重新索引，将列扩充，填充缺失值为NaN
                list_df_s = [df.reindex(columns=unique_columns) for df in list_df_s]

                df_s = pd.DataFrame(
                    data=np.vstack([df.to_numpy() for df in list_df_s]),
                    index=np.hstack([df.index for df in list_df_s]),
                    columns=unique_columns,
                )

                # 将CO数据转化为μg/m3
                # if s == 'CO':
                # df_s = df_s * 1000

                # 存入临时字典
                if s == 'PM2_5':
                    dict_temp_year['PM2.5'] = df_s
                else:
                    dict_temp_year[s] = df_s

                # 发送状态信号
                self.done_callback(None, '合并数据完成：' + s)

            # 保存数据
            for s in dict_temp_year.keys():
                # 文件路径
                path_s = os.path.join(self.dir_h5, s + '.h5')

                # 写入h5
                df2h5(data_=dict_temp_year[s], path_hdf5=path_s)

                # 发送状态信号
                self.done_callback(None, '保存数据完成：' + path_s)

        # 延迟0.5s
        self.msleep(500)

        # 发送状态信号
        self.signal_progress.emit({'text': '完成！', 'value': 1000})


def get_datetime_range(species: str):
    """ 读取数据库hdf5数据的时间范围 
        species: 物种名称, 如: 'O3', 读取该物种的数据库时间范围 """

    # 数据路径
    path_hdf5 = os.path.join(cfg.dir_hdf5, species + '.h5')

    # 判断数据是否存在
    if not os.path.exists(path_hdf5):
        raise FileNotFoundError('数据不存在: %s' % path_hdf5)

    # 打开文件
    f = h5py.File(path_hdf5, 'r')

    # 数据年份列表
    list_year = list(f.keys())

    # 年最小值、最大值
    year_min, year_max = list_year[0], list_year[-1]

    # 数据起始时间、结束时间
    dt_start = pd.to_datetime(f[year_min].attrs['datetime'][0], unit='s')
    dt_end = pd.to_datetime(f[year_max].attrs['datetime'][-1], unit='s')
    # dt_start = pd.to_datetime(f[year_min].attrs['datetimeIndex'][0], unit='s').strftime('%Y-%m-%d')
    # dt_end = pd.to_datetime(f[year_max].attrs['datetimeIndex'][-1], unit='s').strftime('%Y-%m-%d')

    # 转换为元祖(年, 月, 日)
    # dt_start = [int(i) for i in dt_start.split('-')]
    # dt_end = [int(i) for i in dt_end.split('-')]

    # 返回数据
    return dt_start, dt_end


def round_accurately(num, n_digits):
    """ 浮点数精确四舍五入, 为了解决python中round(1.315, 2)结果是1.31, 而不是1.32的问题;
        num: 需要四舍五入的浮点数;
        n_digits: 需要保留的小数位数;

    2022-10-20    v1.0
    """

    d = '0.'.ljust(n_digits + 2, '0')
    return Decimal(str(num)).quantize(Decimal(d), rounding=ROUND_HALF_UP)


class SiteInfo:
    """读取站点信息，并以字典返回

        监测点编码,监测点名称,城市,经度,纬度,对照点,省级行政区,地级行政区
        1001A,万寿西宫,北京,116.3621,39.8784,N,北京市,北京市
        1002A,定陵(对照点),北京,116.2202,40.2915,Y,北京市,北京市
        ...

    2022.08.16  v1.0
    2022.10.08  从ml_io.py文件中转移，之后仅在此文件中更新
    2022.11.08  从ml_geo.py文件中复制, 之后分开更新
    2022.11.08  新增省->市的字典映射
    2023.05.12  从pkg_mean.py中转移，以后在此主要更新

    单进程
    """

    def __init__(self, path_site_list=''):

        # 读取站点数据
        if path_site_list:
            self.path_site_list = path_site_list
        else:
            # self.path_site_list = os.path.join(os.getcwd(), "china_sites_hdf5", "NewSiteList.csv")
            # self.path_site_list = "E:\\bigdata\\gkd\\china_sites_hdf5\\NewSiteList.csv"
            self.path_site_list = cfg.path_site_list

        self.df_info = pd.read_csv(self.path_site_list, engine="c", low_memory=False, encoding="gbk")

        # 监测点编码与经纬度对应 {'1001A': [latitude, longitude]}
        list_code_all = self.df_info.loc[:, "监测点编码"].to_numpy()
        list_coordinate = self.df_info.loc[:, ["纬度", "经度"]].to_numpy()
        self.dict_code2coordinate = dict(zip(list_code_all, list_coordinate))
        # print('self.dict_code2coordinate:\n', self.dict_code2coordinate)

        # 监测点编码与站点名称对应 {'1001A': '万寿西宫'}
        list_code_name = self.df_info.loc[:, "监测点名称"].to_numpy().tolist()
        self.dict_code2name = dict(zip(list_code_all, list_code_name))
        # print('self.dict_code2name:\n', self.dict_code2name)

        # 监测点编码与省级行政区对应 {'1001A': '北京市', '1047A': '河北省'}，注意：直辖市名称会同时出现在省级行政区和地级行政区
        list_code_province = self.df_info.loc[:, "省级行政区"].to_numpy().tolist()
        self.dict_code2province = dict(zip(list_code_all, list_code_province))
        # print('self.dict_code2province:\n', self.dict_code2province)

        # 监测点编码与地级行政区对应 {'1001A': '北京市', '1047A': '邯郸市'}
        # list_code_city = self.df_info.loc[:, '地级行政区'].to_numpy().tolist()
        self.list_city_china = self.df_info.loc[:, "地级行政区"].to_numpy().tolist()
        self.dict_code2city = dict(zip(list_code_all, self.list_city_china))
        # print('self.dict_code2city:\n', self.dict_code2city)

        # 省级行政区与站点列表对应 {'北京市': ['1001A', '1002A', '1003A', ...], '河北省':['1028A', '1029A', '1030A', ...]}
        # self.dict_province2code = dict([[i, j.loc[:, '监测点编码'].to_numpy()] for i, j in self.df_info.groupby(self.df_info['省级行政区'])])

        # 地级行政区与所有站点列表对应 {'北京市': ['1001A', '1002A', '1003A', ...]}
        self.dict_city2code = self.df_info.groupby(self.list_city_china).apply(lambda x: x.loc[:, "监测点编码"].to_numpy()).to_dict()

        # 监测点编码是否为对照点 {'1001A': False, '1002A': True, ... }
        list_code_background = self.df_info.loc[:, "对照点"].to_numpy()
        list_code_background = [True if i == "Y" else False for i in list_code_background]
        self.dict_code2background = dict(zip(list_code_all, list_code_background))
        # print('self.dict_code2background:', self.dict_code2background)

        # 地级行政区与非对照站点列表对应
        # self.dict_city2non_background = {key: [i for i in self.dict_city2code[key] if i not in self.dict_code2background[key]] for key in self.dict_city2code.keys()}

        # 全国所有城市列表去重
        self.list_city_china = sorted(set(self.list_city_china), key=self.list_city_china.index)

        # 全国所有站点
        self.list_code_china = list(chain(*[self.dict_city2code[i] for i in self.list_city_china]))
        # print(self.list_code_china)

        # 全国所有省份，省市名对应
        self.list_province_china = self.df_info.loc[:, "省级行政区"].to_numpy()  # 有重复
        self.dict_province2city = self.df_info.groupby(self.list_province_china).apply(lambda x: np.unique(x.loc[:, "地级行政区"].to_numpy()).tolist()).to_dict()  # 省市名对应字典
        self.list_province_china = self.list_province_china[np.sort(np.unique(self.list_province_china, return_index=True)[1])]  # 省名去重

        # 74城市，按照《环境空气质量标准》（GB3095-2012），74个城市为2012年第一批实施新空气质量标准的城市
        self.list_city_74 = [
            '北京市', '天津市', '石家庄市', '唐山市', '秦皇岛市', '邯郸市', '邢台市', '保定市',
            '张家口市', '承德市', '沧州市', '廊坊市', '衡水市', '太原市', '呼和浩特市', '沈阳市',
            '大连市', '长春市', '哈尔滨市', '上海市', '南京市', '无锡市', '徐州市', '常州市', '苏州市',
            '南通市', '连云港市', '淮安市', '盐城市', '扬州市', '镇江市', '泰州市', '宿迁市', '杭州市',
            '宁波市', '温州市', '嘉兴市', '湖州市', '绍兴市', '金华市', '衢州市', '舟山市', '台州市',
            '丽水市', '合肥市', '福州市', '厦门市', '南昌市', '济南市', '青岛市', '郑州市', '武汉市',
            '长沙市', '广州市', '深圳市', '珠海市', '佛山市', '江门市', '肇庆市', '惠州市', '东莞市',
            '中山市', '南宁市', '海口市', '重庆市', '成都市', '贵阳市', '昆明市', '拉萨市', '西安市',
            '兰州市', '西宁市', '银川市', '乌鲁木齐市'
        ]

        # 京津冀BTH
        self.list_city_jjj = ['北京市', '天津市'] + self.dict_province2city['河北省']

        # "2+26"城市所有站点（与环境状况公报一致，共28个城市）
        self.list_city_2p26 = [
            "北京市", "天津市", "石家庄市", "唐山市",
            "廊坊市", "保定市", "沧州市", "衡水市",
            "邢台市", "邯郸市", "太原市", "阳泉市",
            "长治市", "晋城市", "济南市", "淄博市",
            "济宁市", "德州市", "聊城市", "滨州市",
            "菏泽市", "郑州市", "开封市", "安阳市",
            "鹤壁市", "新乡市", "焦作市", "濮阳市",
        ]
        self.list_code_2p26 = np.hstack([self.dict_city2code[i] for i in self.list_city_2p26])
        # print('self.list_code_2p26:\n', self.list_code_2p26)

        # 珠三角（与环境状况公报一致，共9个城市）
        self.list_city_prd = [
            "广州市", "深圳市", "珠海市",
            "佛山市", "中山市", "江门市",
            "东莞市", "惠州市", "肇庆市",
        ]

        # 长三角（与环境状况公报一致，共41个城市）
        list_province_yrd = ["上海市", "江苏省", "浙江省", "安徽省"]
        self.list_city_yrd = list(chain(*[self.dict_province2city[i] for i in list_province_yrd]))

        # 四川盆地
        self.list_city_scb = [
            "重庆市", "成都市", "绵阳市", "泸州市",
            "南充市", "自贡市", "德阳市", "广元市",
            "遂宁市", "内江市", "乐山市", "宜宾市",
            "广安市", "达州市", "雅安市", "巴中市",
            "眉山市", "资阳市", "遵义市", "毕节市",
        ]

        # 汾渭平原（与环境状况公报一致，共11个城市）
        self.list_city_fwp = [
            "晋中市", "运城市", "临汾市", "吕梁市",
            "洛阳市", "三门峡市", "西安市", "铜川市",
            "宝鸡市", "咸阳市", "渭南市",
        ]


def read_gkd_by_file(path_h5: str, sites: list, date_range: list):
    """ 读取国控点hdf5数据: 单物种多个站点
        采用进程池ThreadPoolExecutor处理
        
        path_h5: h5文件路径
        sites_: 站点代码列表 ['1001A', '1002A', ...]
        date_range: 日期范围, ['2014-05-13', '2020-10-28']

        return: pd.DataFrame

    # 单进程
    2023-08-27 v1
    """

    def read_one_dataset(year_):
        """ 读取其中一个dataset的数据 """

        # 数据集
        dset_ = f[year_]

        # 时间索引
        index_ = dset_.attrs['datetime']

        # hdf5文件中的columns
        columns_ = dset_.attrs['columns']

        # sites_与columns的交集
        columns_to_read = np.intersect1d(columns_, sites)

        # 站点代码列表columns_to_read在columns中的位置索引
        columns_index = np.where(np.isin(columns_, columns_to_read))[0]

        # 读取数据，决速步骤，已优化
        array_data = dset_[()][..., columns_index]

        # 转换为pd.DataFrame
        df_year = pd.DataFrame(
            data=array_data,
            columns=columns_to_read,
            index=pd.to_datetime(index_, unit='s'),
        )

        return df_year

    # sites：list -> np.1darray
    sites = np.array(sites)

    # 日期范围
    dt_start, dt_end = [pd.to_datetime(i) for i in date_range]

    # 年列表
    list_year = map(str, list(range(dt_start.year, dt_end.year + 1)))

    # 打开hdf5文件
    f = h5py.File(name=path_h5, mode="r", swmr=True)

    # 开启线程池处理数据
    pool = ThreadPoolExecutor(max_workers=100)

    # 提交任务
    list_df = list(pool.map(read_one_dataset, list_year))

    # 合并不同年数据
    df_data = pd.concat(objs=list_df, axis=0)

    # 索引名
    df_data.index.name = "datetime"

    # 时间筛选
    df_data = df_data.loc[date_range[0]: date_range[1], ]

    # 返回数据
    return df_data


def read_gkd_by_sites(dir_h5: str, species: list, sites: list, date_range: list):
    """ 读取国控点hdf5数据: 多物种多站点
        
        dir_h5: hdf5文件所在目录
        species: 物种名列表, PM2.5、PM10、SO2、NO2、CO、O3、AQI
        sites_: 站点代码列表 ['1001A', '1002A', ...]
        date_range: 日期范围, ['2014-05-13', '2020-10-28']

        return: dict, {'PM2.5': df1, 'PM10': df2, ...}

    进程池ProcessPoolExecutor
    2023-08-27 v1
    """

    # h5文件路径
    dict_path_h5 = {s: os.path.join(dir_h5, s + '.h5') for s in species}

    """ 读取数据-单进程 """
    if len(species) == 1:
        dict_result = {species[0]: read_gkd_by_file(path_h5=dict_path_h5[species[0]],
                                                    sites=sites,
                                                    date_range=date_range)
                       }

        return dict_result

    """ 读取数据-进程池 """
    # 初始化进程池
    pool = ProcessPoolExecutor(max_workers=10)

    # 准备参数
    args_ = [(dict_path_h5[s], sites, date_range) for s in species]

    # 提交任务
    list_result = list(pool.map(read_gkd_by_file, *zip(*args_)))

    # 结果
    dict_result = dict(zip(species, list_result))

    # 返回数据
    return dict_result


class ReadGkd(QThread):
    """ 从gkd数据库(*.h5)读取数据，并统计

        dir_h5: hdf5文件所在目录
        species: 物种名列表, PM2.5、PM10、SO2、NO2、CO、O3、AQI
        date_range: 日期范围, ['2014-05-13', '2020-10-28']
        dict_stat_standard: 数据统计有效性标准, 默认如下
            {
                "min_num_annual": 200,  # 统计年均值/分位数时至少包含的天数, 国标为: 324
                "min_num_seasonal": 60,  # 统计季节均值/分位数时至少包含的天数, 无国标, 默认为: 60
                "min_num_monthly": 27,  # 统计月均值/分位数时至少包含的天数, 国标为: 27 (2月为25)
                "min_num_daily": 20,  # 统计日均值时至少包含的小时数据, 国标为: 20
                "min_num_8h": 10,  # 统计MDA8时至少包含的8小时滑动均值数据个数, 国标为: 14
                "min_num_moving_avg": 6,  # 统计MDA8时每个滑动均值包含的小时数据, 国标为: 6
            }

        dict_percentile: dict, {'O3': 90, ...}, 计算年均浓度时, 是否使用评价浓度, percentile=90 表示取90分位数;
        site_info: 站点信息类 SiteInfo()
        list2export: 待导出省/地市/站点列表
        site_type: 在统计省/地市数据时考虑的站点类型, 'urban'、'background'、'both'
        time_resolution: 数据输出时间分辨率, 'YS':年均值, 'QS-MAR':季节(3-5为春天), 'MS':月, 'D':天, 'H':小时, 'S':秒;
        region_level: 统计区域类型, 'province'、'city'、'site'
        o3_mda8: 统计O3时, 是否计算日最大8小时均值

    线程池ThreadPoolExecutor
    2023-08-30 v1
    """

    # 信号槽：状态值 {'text': , 'value': }
    signal_progress = pyqtSignal(dict)

    # 信号槽：错误信息
    signal_error = pyqtSignal(str)

    def __init__(self,
                 dir_h5: str,
                 speices: list,
                 date_range: list,
                 dict_stat_standard: dict,
                 dict_percentile: dict,
                 site_info,
                 list2export: list,
                 site_type='urban',
                 time_resolution='H',
                 region_level='site',
                 o3_mda8=True,
                 ):

        super().__init__()

        self.dir_h5 = dir_h5
        self.species = speices
        self.date_range = date_range
        self.dict_stat_standard = dict_stat_standard
        self.dict_percentile = dict_percentile
        self.si = site_info
        self.list2export = list2export
        self.site_type = site_type
        self.time_resolution = time_resolution
        self.region_level = region_level
        self.o3_mda8 = o3_mda8

        # 站点归属
        self.site_dict()

        # 最终数据
        self.data = None

    def site_dict(self):
        """ 整理站点数据，归类到省市 """

        if self.region_level == 'province':
            self.dict_sites, self.sites = get_site_dict(provinces=self.list2export, site_type=self.site_type, site_info=self.si)
        elif self.region_level == 'city':
            self.dict_sites, self.sites = get_site_dict(cities=self.list2export, site_type=self.site_type, site_info=self.si)
        elif self.region_level == 'site':
            self.dict_sites, self.sites = get_site_dict(sites=self.list2export, site_info=self.si)

    def cal_sites(self):
        """ 统计站点数据 """

        # 线程池初始化
        pool = ThreadPoolExecutor(max_workers=8)

        # 准备参数
        args_ = []
        for s in self.species:

            # 百分位数
            percentile_s = self.dict_percentile[s] if s in self.dict_percentile.keys() else None

            if s == 'O3':
                args_.append((self.data[s], self.time_resolution, self.o3_mda8, self.dict_stat_standard, percentile_s))
            else:
                args_.append((self.data[s], self.time_resolution, False, self.dict_stat_standard, percentile_s))

        # 提交任务
        list_result = pool.map(cal_gkd_sites_by_species, *zip(*args_))

        # 提取结果
        self.data = dict(zip(self.species, list_result))

    def cal_cities(self):
        """ 统计城市数据 """

        # 进程池初始化
        pool = ThreadPoolExecutor(max_workers=8)

        # 准备参数
        args_ = []
        for s in self.species:

            # 百分位数
            percentile_s = self.dict_percentile[s] if s in self.dict_percentile.keys() else None

            if s == 'O3':
                args_.append((self.data[s], self.time_resolution, self.o3_mda8, self.dict_stat_standard, self.dict_sites, percentile_s))
            else:
                args_.append((self.data[s], self.time_resolution, False, self.dict_stat_standard, self.dict_sites, percentile_s))

        # 提交任务
        list_result = list(pool.map(cal_gkd_cities_by_species, *zip(*args_)))

        # 结果
        self.data = dict(zip(self.species, list_result))

    def cal_regions(self):
        """ 统计区域数据 """

        # 进程池初始化
        pool = ThreadPoolExecutor(max_workers=8)

        # 准备参数
        args_ = []
        for s in self.species:

            # 百分位数
            percentile_s = self.dict_percentile[s] if s in self.dict_percentile.keys() else None

            if s == 'O3':
                args_.append((self.data[s], self.time_resolution, self.o3_mda8, self.dict_stat_standard, self.dict_sites, percentile_s))
            else:
                args_.append((self.data[s], self.time_resolution, False, self.dict_stat_standard, self.dict_sites, percentile_s))

        # 提交任务
        list_result = list(pool.map(cal_gkd_regions_by_species, *zip(*args_)))

        # 结果
        self.data = dict(zip(self.species, list_result))

    def run(self):

        try:

            """ 读取数据 """
            # 发送状态信号
            self.signal_progress.emit({'text': '读取数据...', 'value': None})

            # 调用函数读取数据
            self.data = read_gkd_by_sites(dir_h5=self.dir_h5, species=self.species, sites=self.sites, date_range=self.date_range)

            """ 整理数据 """
            # 发送状态信号
            self.signal_progress.emit({'text': '数据统计...', 'value': None})

            if self.region_level == 'site':
                self.cal_sites()
            elif self.region_level == 'city':
                self.cal_cities()
            elif self.region_level == 'province':
                self.cal_regions()
            else:
                raise ValueError('暂不支持的导出区域级别！')

            # 发送状态信号
            self.signal_progress.emit({'text': '数据统计完成！', 'value': None})

            # 延迟0.5s
            self.msleep(500)

            # 发送状态信号
            self.signal_progress.emit({'text': '数据统计完成！', 'value': 1000, 'source': 'ReadGkd'})

        except:
            # 错误信息返回
            self.signal_error.emit(traceback.format_exc())


def get_site_dict(provinces=None, cities=None, sites=None, site_type='urban', site_info=None):
    """ 站点整理

        provinces: list, 省级行政区列表
        cities: list, 地市列表
        sites: list, 站点列表，以上三个参数选其中一个输入
        sity_type: string, 站点类型 urban、background、both
        site_info: class, SiteInfo()实例

        return (dict_result, list_sites)
    
    单进程
    2023-08-30 v1
    """

    # 判断省市站点是否为None
    if not any([i is None for i in [provinces, cities, sites]]):
        raise ValueError('无输入！')

    # 判断站点类型
    if site_type not in ['urban', 'background', 'both']:
        raise ValueError('站点类型site_type错误：%s' % site_type)

    # 读取站点信息
    si = site_info

    # 数据存入字典
    dict_result = dict()

    if provinces is not None:

        # 循环处理不同省
        for p in provinces:
            # 地市包含的站点列表存入字典
            dict_result[p] = {c: si.dict_city2code[c] for c in si.dict_province2city[p]}

    elif cities is not None:

        # 地市包含的站点列表存入字典
        dict_result = {c: si.dict_city2code[c] for c in cities}

    elif sites is not None:

        # 正则表达式提取站点：匹配中括号中的字符串
        pattern = r'\[([^]]+)\]'

        # 站点列表
        sites = [re.findall(pattern, i)[0] for i in sites]
        # print(sites)

        return None, sites

    """ 对站点进行筛选 """
    # 站点存入列表
    list_sites = []

    for k1 in dict_result.keys():
        if isinstance(dict_result[k1], (list, np.ndarray)):
            if site_type == 'urban':
                dict_result[k1] = [s for s in dict_result[k1] if not si.dict_code2background[s]]
            elif site_type == 'background':
                dict_result[k1] = [s for s in dict_result[k1] if si.dict_code2background[s]]
            else:
                dict_result[k1] = list(dict_result[k1])

            list_sites.extend(dict_result[k1])

        else:
            for k2 in dict_result[k1].keys():
                if site_type == 'urban':
                    dict_result[k1][k2] = [s for s in dict_result[k1][k2] if not si.dict_code2background[s]]
                elif site_type == 'background':
                    dict_result[k1][k2] = [s for s in dict_result[k1][k2] if si.dict_code2background[s]]
                else:
                    dict_result[k1][k2] = list(dict_result[k1][k2])

                list_sites.extend(dict_result[k1][k2])

    # 返回数据
    return dict_result, list_sites


def cal_iaqi(row: np.ndarray):
    """ 某污染物空气质量分指数IAQI计算
        https://www.mee.gov.cn/ywgz/fgbz/bz/bzwb/jcffbz/201203/t20120302_224166.shtml

        row: 一维数组，含有5个元素，分别为cp, bp_lo, bp_hi, iaqi_lo, iaqi_hi

        return: 返回该污染物的IAQI

    2023-07-13 v1
    单进程
    与public/_gkd.py同步更新
    """

    # 参数分配
    cp, bp_lo, bp_hi, iaqi_lo, iaqi_hi = row

    return (iaqi_hi - iaqi_lo) * (cp - bp_lo) / (bp_hi - bp_lo) + iaqi_lo


def cal_aqi(data: pd.DataFrame):
    """ 计算日AQI和首要污染物,

        参考：环境空气质量指数（AQI）技术规定（试行）
             Technical Regulation on Ambient Air Quality Index (on trial)
             标准号：HJ 633—2012
             https://www.mee.gov.cn/ywgz/fgbz/bz/bzwb/jcffbz/201203/t20120302_224166.shtml

        data: 含有'PM2.5'、'PM10'、'SO2'、'NO2'、'CO'、'O3'列的日均值数据（O3为MDA8， CO浓度为mg/m3），
              列名全部大写，含有datetime索引

        return: 返回各污染物浓度的IAQI分指数、AQI及首要污染物
                {'iaqi': pd.DataFrame,
                 'aqi': pd.Series,
                 'primary_pollutant': pd.Series,
                }

    2023-07-13 v1
    单进程
    与public/_gkd.py同步更新
    """

    # 空气质量分指数及对应的污染物项目浓度限值

    dict_input = {
        'IAQI': [0, 50, 100, 150, 200, 300, 400, 500],
        'SO2_24h': [0, 50, 150, 475, 800, 1600, 2100, 2620],
        # 'SO2_1h': [0, 150, 500, 650, 800],
        'NO2_24h': [0, 40, 80, 180, 280, 565, 750, 940],
        # 'NO2_1h': [0, 100, 200, 700, 1200, 2340, 3090, 3840],
        'PM10_24h': [0, 50, 150, 250, 350, 420, 500, 600],
        'CO_24h': [0, 2, 4, 14, 24, 36, 48, 60],
        # 'CO_1h': [0, 5, 10, 35, 60, 90, 120, 150],
        # 'O3_1h': [0, 160, 200, 300, 400, 800, 1000, 1200],
        'O3_24h': [0, 100, 160, 215, 265, 800],
        'PM2.5_24h': [0, 35, 75, 115, 150, 250, 350, 500],
    }

    # 物种列表
    list_species_ = ['PM2.5', 'PM10', 'SO2', 'NO2', 'CO', 'O3']

    if not all([i in data.columns for i in list_species_]):
        raise ValueError('data中未包含如下列：%s' % ','.join([i for i in list_species_ if i not in data.columns]))

    list_series = []
    for s in list_species_:
        # key名
        key_s = s + '_24h'

        # 最高浓度限值区间
        max_interval = pd.Interval(left=dict_input[key_s][-2], right=dict_input[key_s][-1], closed='right')

        # 浓度限值区间，在上述区间外的值为nan
        bp_s = pd.cut(x=data[s], bins=dict_input[key_s], include_lowest=True)

        # 替换nan为最高限值区间
        bp_s = bp_s.fillna(max_interval)

        # 提取当前物种浓度数据
        df_s = data.loc[:, [s]]

        # 将BP区间的上下限提取
        df_s['BP_Lo'] = np.array(map(int, [i.left for i in bp_s]))
        df_s['BP_Hi'] = np.array(map(int, [i.right for i in bp_s]))
        # print(df_s)

        # 浓度限值与iaqi对应
        dict_bp2iaqi = dict(zip(dict_input[key_s], dict_input['IAQI']))
        df_s['IAQI_Lo'] = df_s['BP_Lo'].map(dict_bp2iaqi)
        df_s['IAQI_Hi'] = df_s['BP_Hi'].map(dict_bp2iaqi)

        # 计算该物种的IAQI
        df_s['IAQI'] = df_s.apply(cal_iaqi, axis=1, raw=True)

        # 删除含有nan的行
        df_s.dropna(how='any', axis=0, inplace=True)

        # 添加进list
        list_series.append(df_s['IAQI'])

    # 合并不同组分的IAQI
    df_iaqi = pd.concat(objs=list_series, axis=1)
    df_iaqi.columns = list_species_

    # 计算AQI
    series_aqi = df_iaqi.max(axis=1).round(decimals=0)

    # 统计首要污染物，仅在AQI>50时统计
    series_pri_pollutant = df_iaqi.idxmax(axis='columns')
    series_pri_pollutant.name = 'primary_pollutant'

    # 去除AQI<=50的值
    index_aqi_less_than_50 = series_aqi[series_aqi <= 50].index
    series_pri_pollutant[index_aqi_less_than_50] = 'AQI≤50'
    # series_pri_pollutant[index_aqi_less_than_50] = np.nan

    # print(series_pri_pollutant)

    # 返回数据
    return {'iaqi': df_iaqi,
            'aqi': series_aqi,
            'primary_pollutant': series_pri_pollutant,
            'raw': df_s,
            }


def cal_gkd_sites_by_species(data_: pd.DataFrame, time_resolution: str, o3_mda8: bool, dict_stat_standard: dict, percentile=None):
    """ # 整理国控点数据 -> 站点级别数据

        data_: 各站点小时均值数据
        time_resolution: 数据输出时间分辨率(重要), 'YS'代表年均值, 'QS-MAR' 代表季节(3-5为春天), 'MS'代表月, 'D'代表天, 'H'代表小时;
        o3_mda8: 计算臭氧浓度时，当输出分辨率为天月季年时，是否使用MDA8浓度代替日浓度计算
        dict_stat_standard: 数据统计有效性标准, 默认如下
        {
            "min_num_annual": 200,  # 统计年均值/分位数时至少包含的天数，国标为：324
            "min_num_seasonal": 60,  # 统计季节均值/分位数时至少包含的天数，无国标，默认为：60
            "min_num_monthly": 27,  # 统计月均值/分位数时至少包含的天数，国标为：27（2月为25）
            "min_num_daily": 20,  # 统计日均值时至少包含的小时数据，国标为：20
            "min_num_8h": 10,  # 统计MDA8时至少包含的8小时滑动均值数据个数，国标为：14
            "min_num_moving_avg": 6,  # 统计MDA8时每个滑动均值包含的小时数据，国标为：6
        }
        percentile: 计算年均浓度时，是否使用评价浓度，percentile=90 表示取90分位数;

        return: pd.DataFrame
    
    单进程
    2023-08-29 v1
    """

    if time_resolution == "H":
        return data_

    # 计算站点的日均值（如果是O3，则视情况计算MDA8）
    if o3_mda8 is True:
        df_mean = cal.cal_mda8(data=data_,
                               time_resolution="H",
                               min_num_moving_avg=dict_stat_standard["min_num_moving_avg"],
                               min_num_8h=dict_stat_standard["min_num_8h"],
                               )

    else:
        df_mean = cal.cal_daily(data=data_,
                                time_resolution="H",
                                min_num_daily=dict_stat_standard["min_num_daily"],
                                )[0]

    # 再计算对应分辨率的均值
    if time_resolution == "MS":

        if percentile:
            df_mean = cal.cal_monthly(data=df_mean,
                                      time_resolution="D",
                                      percentile=percentile,
                                      min_num_monthly=dict_stat_standard["min_num_monthly"],
                                      )[-1]
        else:
            df_mean = cal.cal_monthly(data=df_mean,
                                      time_resolution="D",
                                      min_num_monthly=dict_stat_standard["min_num_monthly"],
                                      )[0]

    elif time_resolution == "QS-MAR":

        if percentile:
            df_mean = cal.cal_seasonal(data=df_mean,
                                       time_resolution="D",
                                       percentile=percentile,
                                       min_num_seasonal=dict_stat_standard["min_num_seasonal"],
                                       )[-1]
        else:
            df_mean = cal.cal_seasonal(data=df_mean,
                                       time_resolution="D",
                                       min_num_seasonal=dict_stat_standard["min_num_seasonal"],
                                       )[0]

    elif time_resolution == "YS":

        if percentile:
            df_mean = cal.cal_yearly(data=df_mean,
                                     time_resolution="D",
                                     percentile=percentile,
                                     min_num_annual=dict_stat_standard["min_num_annual"],
                                     )[-1]
        else:
            df_mean = cal.cal_yearly(data=df_mean,
                                     time_resolution="D",
                                     min_num_annual=dict_stat_standard["min_num_annual"],
                                     )[0]

    else:
        pass

    # 返回数据
    return df_mean


def cal_gkd_cities_by_species(data_: pd.DataFrame, time_resolution: str, o3_mda8: bool, dict_stat_standard: dict, dict_city: dict, percentile=None):
    """ 整理国控点数据 -> 城市级别统计数据

        data_: 各站点小时均值数据
        time_resolution: 数据输出时间分辨率(重要), 'YS'代表年均值, 'QS-MAR' 代表季节(3-5为春天), 'MS'代表月, 'D'代表天, 'H'代表小时;
        o3_mda8: 计算臭氧浓度时，当输出分辨率为天月季年时，是否使用MDA8浓度代替日浓度计算
        dict_stat_standard: 数据统计有效性标准, 默认如下
        {
            "min_num_annual": 200,  # 统计年均值/分位数时至少包含的天数，国标为：324
            "min_num_seasonal": 60,  # 统计季节均值/分位数时至少包含的天数，无国标，默认为：60
            "min_num_monthly": 27,  # 统计月均值/分位数时至少包含的天数，国标为：27（2月为25）
            "min_num_daily": 20,  # 统计日均值时至少包含的小时数据，国标为：20
            "min_num_8h": 10,  # 统计MDA8时至少包含的8小时滑动均值数据个数，国标为：14
            "min_num_moving_avg": 6,  # 统计MDA8时每个滑动均值包含的小时数据，国标为：6
        }
        dict_city: {'北京市': ['1001A', '1003A', ...], '保定市': ['1051A', '1052A', ...], ...}
        percentile: 统计城市浓度时, 是否使用评价浓度, percentile=90表示取90分位数;

        return: pd.DataFrame
    
    单进程
    2023-08-30 v1
    """

    # 城市列表
    list_city_all = list(dict_city.keys())

    if time_resolution == 'H':

        list_series_city = []
        for city in list_city_all:
            # 站点列表
            list_sites_of_city = [site for site in dict_city[city] if site in data_.columns]
            # print('list_sites_of_city:', list_sites_of_city)

            if list_sites_of_city:
                # 待求平均的数据
                data_to_mean = data_.loc[:, list_sites_of_city]

                # 去除空行
                data_to_mean.dropna(how='all', axis=0, inplace=True)

                # 求平均
                series_city = pd.Series(data=np.nanmean(data_to_mean.to_numpy(), axis=1),
                                        index=data_to_mean.index,
                                        name=city)

                list_series_city.append(series_city)

        # 合并各城市数据
        df_mean = pd.concat(objs=list_series_city, axis=1)

        # 返回数据
        return df_mean

    # 其它分辨率先计算日均值（如果是O3，则视情况计算MDA8）
    if o3_mda8 is True:
        df_mean = cal.cal_mda8(
            data=data_,
            time_resolution="H",
            min_num_moving_avg=dict_stat_standard["min_num_moving_avg"],
            min_num_8h=dict_stat_standard["min_num_8h"],
        )

    else:
        df_mean = cal.cal_daily(
            data=data_,
            time_resolution="H",
            min_num_daily=dict_stat_standard["min_num_daily"],
        )[0]

    # print('df_mean:\n', df_mean)

    # 非空值城市列表
    list_city_export = []

    # 将城市包含的站点 -> 城市对应的DataFrame {'北京市': df1, '保定市': df2, ...}，并统计为城市日均值
    list_df_city = []
    for city in list_city_all:
        # 包含的站点列表
        list_sites_of_city = [site for site in dict_city[city] if site in df_mean.columns]
        # print('list_sites_of_city:', list_sites_of_city)

        if list_sites_of_city:
            # 待求平均的数据
            data_to_mean = df_mean.loc[:, list_sites_of_city]

            # 去除空行
            data_to_mean.dropna(how='all', axis=0, inplace=True)

            # 求平均
            series_city = pd.Series(data=np.nanmean(data_to_mean.to_numpy(), axis=1),
                                    index=data_to_mean.index,
                                    name=city,
                                    )

            # 城市数据添加至列表
            list_df_city.append(series_city)

            # 城市名称添加至列表
            list_city_export.append(city)

    # 合并各城市数据
    df_mean = pd.concat(objs=list_df_city, axis=1)

    # 表头 -> 城市名
    df_mean.columns = list_city_export

    # 再计算对应分辨率的均值
    if time_resolution == "MS":

        if percentile:
            df_mean = cal.cal_monthly(
                data=df_mean,
                time_resolution="D",
                percentile=percentile,
                min_num_monthly=dict_stat_standard["min_num_monthly"],
            )[-1]
        else:
            df_mean = cal.cal_monthly(
                data=df_mean,
                time_resolution="D",
                min_num_monthly=dict_stat_standard["min_num_monthly"],
            )[0]

    elif time_resolution == "QS-MAR":

        if percentile:
            df_mean = cal.cal_seasonal(
                data=df_mean,
                time_resolution="D",
                percentile=percentile,
                min_num_seasonal=dict_stat_standard["min_num_seasonal"],
            )[-1]
        else:
            df_mean = cal.cal_seasonal(
                data=df_mean,
                time_resolution="D",
                min_num_seasonal=dict_stat_standard["min_num_seasonal"],
            )[0]

    elif time_resolution == "YS":

        if percentile:
            df_mean = cal.cal_yearly(
                data=df_mean,
                time_resolution="D",
                percentile=percentile,
                min_num_annual=dict_stat_standard["min_num_annual"],
            )[-1]
        else:
            df_mean = cal.cal_yearly(
                data=df_mean,
                time_resolution="D",
                min_num_annual=dict_stat_standard["min_num_annual"],
            )[0]

    else:
        pass

    # 返回数据
    return df_mean


def cal_gkd_regions_by_species(data_: pd.DataFrame, time_resolution: str, o3_mda8: bool, dict_stat_standard: dict, dict_region: dict, percentile=None):
    """ 整理国控点数据 -> 区域级别统计数据 (还需核实区域浓度计算标准)
        区域均值以包含的各城市数据为计算基准: 
            计算区域日均值: 先计算各城市的日均值, 再计算不同城市的均值
            计算区域月均值: 先计算各城市的月均值/百分位数, 再计算不同城市的均值
            计算区域季节均值: 先计算各城市的季节均值/百分位数, 再计算不同城市的均值
            计算区域年均值: 先计算各城市的年均值/百分位数, 再计算不同城市的均值
    
        data_: 各站点小时均值数据
        time_resolution: 数据输出时间分辨率(重要), 'YS'代表年均值, 'QS-MAR' 代表季节(3-5为春天), 'MS'代表月, 'D'代表天;
        o3_mda8: 计算臭氧浓度时, 当输出分辨率为天月季年时, 是否使用MDA8浓度代替日浓度计算
        dict_stat_standard: 数据统计有效性标准, 默认如下
        {
            "min_num_annual": 200,  # 统计年均值/分位数时至少包含的天数，国标为：324
            "min_num_seasonal": 60,  # 统计季节均值/分位数时至少包含的天数，无国标，默认为：60
            "min_num_monthly": 27,  # 统计月均值/分位数时至少包含的天数，国标为：27（2月为25）
            "min_num_daily": 20,  # 统计日均值时至少包含的小时数据，国标为：20
            "min_num_8h": 10,  # 统计MDA8时至少包含的8小时滑动均值数据个数，国标为：14
            "min_num_moving_avg": 6,  # 统计MDA8时每个滑动均值包含的小时数据，国标为：6
        }
        dict_region: {'河北省': {
                                '保定市': ['1051A', '1052A', ...], 
                                '石家庄市': ['1029A', '1030A', ...], 
                                 ... 
                      '河南省': {...}
                       ...
                     }
        percentile: 统计区域浓度时, 是否使用评价浓度, percentile=90表示取90分位数;

        return: pd.DataFrame
    
    单进程
    2023-09-03 v1
    """

    # 区域列表
    list_regions = list(dict_region.keys())

    list_series_region = []
    for region in list_regions:
        # 区域包含的所有站点列表
        list_sites_in_region = list(chain(*[dict_region[region][i] for i in dict_region[region].keys()]))

        # 筛选数据中包含的站点列表
        list_sites_in_region = [i for i in list_sites_in_region if i in data_.columns]

        # 该区域的数据
        data_region = data_.loc[:, list_sites_in_region]

        # 按城市统计
        df_cities = cal_gkd_cities_by_species(data_=data_region,
                                              time_resolution=time_resolution,
                                              o3_mda8=o3_mda8,
                                              dict_stat_standard=dict_stat_standard,
                                              dict_city=dict_region[region],
                                              percentile=percentile,
                                              )

        # 去除空行
        df_cities.dropna(how='all', axis=0, inplace=True)

        series_region = pd.Series(data=np.nanmean(df_cities.to_numpy(), axis=1),
                                  index=df_cities.index,
                                  name=region)

        list_series_region.append(series_region)

    # 合并各区域数据
    df_mean = pd.concat(objs=list_series_region, axis=1)

    # 返回数据
    return df_mean


def filename_correct(old_filename):
    """ 对不符合文件名命名规则的字符进行替换

    2021-06-30 v1
    2023-08-14 v2 
    与_rw.py同步更新
    """

    # chars_dict = {'\\': '╲', '/': '╱', ':': '︰', '*': '✱', '?': '？', '"': '〞', '<': '＜', '>': '＞', '|': '｜'} # v1
    chars_dict = {'\\': '╲', '/': '╱', ':': '︰', '*': '×',
                  '?': '？', '"': '〞', '<': '＜', '>': '＞', '|': '｜'}  # v2
    new_filename = old_filename

    for char in chars_dict.keys():
        new_filename = new_filename.replace(char, chars_dict[char])

    return new_filename


def dict2xlsx(data: dict, path_xlsx: str):
    """ 保存多个工作表数据至xlsx

        data: dict，每个key对应一个工作表名称，value为待保存的数据pd.DataFrame()
        path_xlsx: xlsx文件保存路径

    无返回值
    2023-08-14 v1 和public\_io.py中同步更新
    单进程
    """

    # sheet name
    list_sheet = list(data.keys())

    # 写入数据
    wb = Workbook()
    for s in list_sheet:
        # 提取
        df_s = data[s]

        # 索引转变为列
        df_s.reset_index(inplace=True)

        # 转置
        df_s = df_s.transpose()

        # 在第一行插入表头
        df_s.insert(loc=0, column='', value=df_s.index)

        # 转置
        df_s = df_s.transpose()

        # NaN值替换为None
        df_s.replace(np.nan, None, inplace=True)

        # 写入新工作表
        wb.new_sheet(sheet_name=filename_correct(s), data=df_s.values.tolist())

    # 保存
    wb.save(path_xlsx)


def format_list2export(sites_: list, site_info=None):
    """ 添加站点详细信息 
    
        sites_: 站点代码列表['1001A', '1002A', '1003A', ...]
        site_info: SiteInfo实例
        return: list, 组合后的列表, 如

    2023-09-07
    """

    if site_info is None:
        site_info = SiteInfo().dict_code2coordinate

    # 重新组合站点框中显示的内容：站点名[站点代码]{纬度,经度}{是否为对照点}
    dict_backgroud = {True: '对照点', False: ''}
    list_site = [site_info.dict_code2name[i] + ' [' + i + ']' + ' {' + ','.join([str(round_accurately(k, 3)) for k in site_info.dict_code2coordinate[i]]) + '} ' + dict_backgroud[site_info.dict_code2background[i]] for i in sites_]

    return list_site


def format_dt(dt: pd.DatetimeIndex, time_resolution):
    """ 日期格式化 """

    # 根据time_resolution设置返回的index类型
    if time_resolution == 'MS':
        return dt.strftime('%Y-%m')
    elif time_resolution == 'QS-MAR':
        year_ = dt.strftime('%Y-')
        month_ = dt.month

        if month_ == 3:
            return year_ + 'Sp.'
        elif month_ == 6:
            return year_ + 'Su.'
        elif month_ == 9:
            return year_ + 'Au.'
        elif month_ == 12:
            return year_ + 'Wi.'

    elif time_resolution == 'YS':
        return dt.strftime('%Y')
    else:
        raise ValueError('不支持的time_resolution: %s' % time_resolution)


def aqi2pollution_level(data_aqi: pd.DataFrame, time_resolution: str):
    """ 将AQI值按污染程度划分

        空气质量指数AQI区分污染程度的bins = [0, 50, 100, 150, 200, 300, 500]
        对应的污染程度['优', '良', '轻度污染', '中度污染', '重度污染', '严重污染']
        颜色列表['#66c430', '#e9da2e', '#f57217', '#ee1c25', '#66247b', '#8a2327']

        data_aqi: index为DataTimeIndex, 日均值, columns为城市名, 值为AQI
        time_resolution: 统计分辨率, 支持'MS', 'QS-MAR', 'YS'

        return: dict, key为城市名, value为df, df的index为时间, columns为不同污染程度, 值为百分比
                
    2023-09-08 v1
    """

    # 对每一列数据执行cut操作，划分数据范围为不同的类别
    df_catergories = data_aqi.apply(lambda col: pd.cut(col, bins=cfg.bins, include_lowest=True, labels=cfg.list_pollution_level))

    # 按年分组
    group_ = df_catergories.groupby(pd.Grouper(freq=time_resolution))

    # 数据存到字典，用于保存
    dict_data = dict()
    for dt, df_dt in group_:
        # 计算每列每个类别的占比
        category_percentage = df_dt.apply(lambda col: col.value_counts(normalize=True) * 100).T.round(2)

        # index name
        category_percentage.index.name = '城市'

        # 数据存入字典，key为年，value为df，df的index为污染程度，columns为不同城市名称
        dict_data[dt] = category_percentage
        # dict_data[format_dt(dt, time_resolution)] = category_percentage

    # print(dict_data)

    # 城市列表
    list_city = data_aqi.columns.tolist()

    # 时间列表
    list_dt = list(dict_data.keys())

    # 将dict_data转换为key为城市名，value为df，df的index为污染程度，columns为不同时间的字典
    dict2plot = dict()
    for city in list_city:
        # 提取、合并数据
        df_city = pd.concat(objs=[dict_data[k].loc[[city], :] for k in dict_data.keys()], axis=0)
        # print(df_city)

        # index
        df_city.index = list_dt

        # 重置columns顺序
        df_city = df_city.reindex(columns=cfg.list_pollution_level)

        # index name
        df_city.index.name = 'Date'

        # 存入字典
        dict2plot[city] = df_city

    # 转换时间序列为适合保存的格式
    dict2save = dict()
    for k in dict2plot.keys():
        # 提取数据
        df_k = dict2plot[k].copy()

        # 准换DateTimeIndex
        df_k.index = [format_dt(dt=dt, time_resolution=time_resolution) for dt in df_k.index]

        # index name
        df_k.index.name = 'Date'

        # 存入字典
        dict2save[k] = df_k

    # 返回数据
    return dict2plot, dict2save


# def stat_count(data: pd.Series | pd.DataFrame):
#     """ 统计数据中各值出现的次数

#         return: dict, {'':, '': , ...} key为data中的column, value为字典: data中每个值的个数

#     2023-09-10
#     """

#     print(data)


def cal_primary_pollutant(data: dict, time_resolution: str):
    """ 计算首要污染物 

        data: key为物种名称, 必须包含'CO', 'SO2', 'NO2', 'O3', 'PM2.5', 'PM10'
              value为df, df的index为日分辨率DateTimeIndex, columns为不同城市名称

        time_resolution: 统计分辨率, 支持'MS', 'QS-MAR', 'YS'
        
        return: (pd.DataFrame, dict) index为日期, columnn为['O3', 'SO2', 'NO2', 'CO', 'PM10', 'PM2.5'], 顺序不能变
                pd.DataFrame是所有城市的统计数据
                dict是分城市的统计数据
        
    2023-09-08
    """

    # 必须包含的物种
    list_species = ['O3', 'SO2', 'NO2', 'CO', 'PM10', 'PM2.5']

    # 检查物种是否齐全
    list_species_miss = [i for i in list_species if i not in data.keys()]

    if not list_species:
        raise ValueError('data must contain species: ' + ', '.join(list_species_miss))

    # 城市列表
    list_city_all = data['CO'].columns.tolist()

    # 待合并城市数据
    list_series_all = []

    # 最终导出的城市列表
    list_city_export = []

    # 按城市统计
    for city in list_city_all:

        # 提取当前城市不同物种的数据
        list_series = [data[s].loc[:, city] for s in list_species]

        # 合并
        df_city = pd.concat(objs=list_series, axis=1)

        # 设置columns
        df_city.columns = list_species

        # 去除nan行
        df_city.dropna(how='all', axis=0, inplace=True)

        if df_city.empty:
            continue

        # 计算首要污染物
        series_primary_pollutant = cal_aqi(data=df_city)['primary_pollutant']
        # series_primary_pollutant = gkd.cal_aqi(data=df_city)['aqi']

        # 当前城市数据添加至列表
        list_series_all.append(series_primary_pollutant)

        # 当前城市名称添加至列表
        list_city_export.append(city)

    # 合并不同城市首要污染物数据
    df_primary_pollutant = pd.concat(objs=list_series_all, axis=1)
    # print('df_primary_pollutant:\n', df_primary_pollutant)

    # 设置columns
    df_primary_pollutant.columns = list_city_export

    # 按time_resolution分组
    group_dt = df_primary_pollutant.resample(time_resolution)

    # 日期索引
    list_dt = [dt for dt, _ in group_dt]
    # list_dt = [format_dt(dt, time_resolution) for dt, _ in group_dt]

    # 计数数据分日期存放
    list_df_city = []

    # 按日期统计
    for _, df_dt in group_dt:
        # 按城市统计
        count_city = df_dt.apply(lambda col: col.value_counts())
        # print(series_city)

        # 存入列表
        list_df_city.append(count_city)

    # 最终统计数据分城市统计
    dict_data_city = dict()

    # 分城市统计
    for city in list_city_export:

        # 合并数据
        df_count_city = pd.concat(objs=[df.loc[:, city] for df in list_df_city], axis=1).T

        # 设置index
        df_count_city.index = list_dt

        # index name
        df_count_city.index.name = 'Date'

        # print(df_count_city)

        # # 是否将AQI≤50的数据计算在内
        # if cfg.primary_pollutant_consider_aqi_lt_50:
        #     # Nan列是没有首要污染物的列，即AQI≤50，将Nan列改名为AQI≤50
        #     df_count_city.rename(columns={np.nan: 'AQI≤50'}, inplace=True)
        # else:
        #     # 删除Nan列
        #     df_count_city.drop(columns=[np.nan], inplace=True)

        # 归一化
        df_count_city = (df_count_city.T / df_count_city.sum(axis=1)).T * 100
        # print(df_count_city)

        # 存入字典
        dict_data_city[city] = df_count_city

    # 统计所有城市
    df_count = pd.DataFrame(data=[dict(Counter(df_dt.to_numpy().flatten())) for _, df_dt in group_dt],
                            index=list_dt,
                            )

    # if cfg.primary_pollutant_consider_aqi_lt_50:
    #     # Nan列是没有首要污染物的列，即AQI≤50，将Nan列改名为AQI≤50
    #     df_count.rename(columns={np.nan: 'AQI≤50'}, inplace=True)
    # else:
    #     # 删除Nan列
    #     df_count.drop(columns=[np.nan], inplace=True)

    # 归一化
    df_count = (df_count.T / df_count.sum(axis=1)).T * 100

    # print(df_count)
    # print(dict_data_city)
    # 返回数据: 所有城市的统计, 分城市的统计数据
    return df_count, dict_data_city


class AQI2PollutionLevel(QThread):
    """ 开辟新进程, 根据AQI计算换算污染级别

    无返回值
    2023-09-11
    """

    # 信号槽：状态值 {'text': , 'value': }
    signal_progress = pyqtSignal(dict)

    # 信号槽：错误信息
    signal_error = pyqtSignal(str)

    def __init__(self, data_: dict, path_html: str, time_resolution:str):
        super().__init__()

        self.data = data_
        self.time_resolution = time_resolution
        self.path_html = path_html
    
    def run(self):
        """ 调取相应的函数进行处理 """

        try:

            # 发送状态信号
            self.signal_progress.emit({'text': '根据AQI计算污染级别...', 'value': None})

            # AQI数据分类
            dict2plot, dict2save = aqi2pollution_level(data_aqi=self.data, time_resolution=self.time_resolution)

            # 调取函数处理
            # _, dict_data_city = cal_primary_pollutant(data=self.data, time_resolution=self.time_resolution)

            # 发送状态信号
            self.signal_progress.emit({'text': '作图中...', 'value': None})

            print('dict2plot:\n', dict2plot)

            # 作图
            if self.time_resolution == 'YS':
                plot_html.aqi_bar_by_city_yearly(data_=dict2plot, path_html=self.path_html)
            elif self.time_resolution == 'QS-MAR':
                # plot_html.aqi_bar_by_city_seasonal(data_=dict2plot, path_html=path_html)
                plot_html.aqi_bar_by_city_seasonal_bokeh(data_=dict2plot, path_html=self.path_html)

            elif self.time_resolution == 'MS':
                # plot_html.aqi_bar_by_city_monthly(data_=dict_plot, path_html=path_html)
                plot_html.aqi_bar_by_city_monthly_bokeh(data_=dict2plot, path_html=self.path_html)

            # 发送状态信号
            self.signal_progress.emit({'text': '完成', 'value': 1000, 'data': dict2save})
        
        except:
            # 错误信息返回
            self.signal_error.emit(traceback.format_exc())


class CalPrimaryPolltant(QThread):
    """ 开辟新进程, 计算首要污染物并作图

    无返回值
    2023-09-11
    """

    # 信号槽：状态值 {'text': , 'value': }
    signal_progress = pyqtSignal(dict)

    # 信号槽：错误信息
    signal_error = pyqtSignal(str)

    def __init__(self, data_: dict, path_html: str, time_resolution='MS'):
        super().__init__()

        self.data = data_
        self.time_resolution = time_resolution
        self.path_html = path_html
    
    def run(self):
        """ 调取相应的函数进行处理 """

        try:

            # 发送状态信号
            self.signal_progress.emit({'text': '计算首要污染物...', 'value': None})

            # 调取函数处理
            _, dict_data_city = cal_primary_pollutant(data=self.data, time_resolution=self.time_resolution)

            # 发送状态信号
            self.signal_progress.emit({'text': '作图中...', 'value': None})

            # 作图
            plot_html.plot_primary_pollutant_by_city_bokeh(data_=dict_data_city, path_html=self.path_html)

            # 发送状态信号
            self.signal_progress.emit({'text': '完成', 'value': 1000, 'data': dict_data_city})
        
        except:
            # 错误信息返回
            self.signal_error.emit(traceback.format_exc())


class ExceedStandard(QThread):
    """ 开辟新进程, 统计PM2.5/O3超标情况

    无返回值
    2023-09-14
    """

    # 信号槽：状态值 {'text': , 'value': }
    signal_progress = pyqtSignal(dict)

    # 信号槽：错误信息
    signal_error = pyqtSignal(str)

    def __init__(self, data_: dict, path_html: str, species: str, filename_xlsx: str):
        super().__init__()

        self.data = data_
        self.path_html = path_html
        self.species = species
        # self.threshold = threshold  # (35, 75) 或(100, 160)
        self.filename_xlsx = filename_xlsx

        # 存放作图数据        
        self.dict_data = dict()
    
    def pm2p5(self):
        """ 整理PM2.5的数据 """

        # 提取数据
        df_pm2p5 = self.data['PM2.5']

        # 按月分组
        group_monthly = df_pm2p5.resample('MS')

        # 按月统计是否该月全为nan值
        is_all_nan = group_monthly.apply(lambda x: x.isna().all())

        # 统计超过一级标准天数,
        df_pm2p5_i = group_monthly.apply(lambda x: (x >= 35).sum())
        
        # 统计超过二级标准天数,
        df_pm2p5_ii = group_monthly.apply(lambda x: (x >= 75).sum())

        # 将全月为nan的月份统计值设为nan
        df_pm2p5_i[is_all_nan] = np.nan
        df_pm2p5_ii[is_all_nan] = np.nan

        # 城市列表
        list_city = df_pm2p5.columns.tolist()

        # 分城市统计
        for city in list_city:

            df_city_i = df_pm2p5_i.loc[:, city]
            df_city_ii = df_pm2p5_ii.loc[:, city]

            # 合并，第1列为超过二级标准的天数，第2列为超过一级标准的天数
            df_city = pd.concat([df_city_ii, df_city_i], axis=1)

            # 第2列减去第1列，即将第2列设置为超过一级标准，但没超过二级标准的天数
            df_city.iloc[:, 1] = df_city.iloc[:, 1] - df_city.iloc[:, 0]

            # columns
            df_city.columns = ['PM₂.₅ Daily Mean ≥ 75 μg m⁻³', '35 μg m⁻³ ≤ PM₂.₅ Daily Mean < 75 μg m⁻³']

            # 去除全为nan的行
            df_city.dropna(how='all', axis=0, inplace=True)
            
            # 存入字典
            self.dict_data[city] = df_city    

    def o3(self):
        """ 整理臭氧的数据 """
        
        # 提取数据
        df_o3 = self.data['O3']

        # 按月分组
        group_monthly = df_o3.resample('MS')

        # 按月统计是否该月全为nan值
        is_all_nan = group_monthly.apply(lambda x: x.isna().all())

        # 统计超过一级标准天数,
        df_o3_i = group_monthly.apply(lambda x: (x >= 100).sum())
        
        # 统计超过二级标准天数,
        df_o3_ii = group_monthly.apply(lambda x: (x >= 160).sum())

        # 将全月为nan的月份统计值设为nan
        df_o3_i[is_all_nan] = np.nan
        df_o3_ii[is_all_nan] = np.nan

        # 城市列表
        list_city = df_o3.columns.tolist()

        # 分城市统计
        for city in list_city:

            df_city_i = df_o3_i.loc[:, city]
            df_city_ii = df_o3_ii.loc[:, city]

            # 合并，第1列为超过二级标准的天数，第2列为超过一级标准的天数
            df_city = pd.concat([df_city_ii, df_city_i], axis=1)

            # 第2列减去第1列，即将第2列设置为超过一级标准，但没超过二级标准的天数
            df_city.iloc[:, 1] = df_city.iloc[:, 1] - df_city.iloc[:, 0]

            # columns
            df_city.columns = ['O₃-MDA8 ≥ 160 μg m⁻³', '100 μg m⁻³ ≤ O₃-MDA8 < 160 μg m⁻³']

            # 去除全为nan的行
            df_city.dropna(how='all', axis=0, inplace=True)
            
            # 存入字典
            self.dict_data[city] = df_city

    def pm2p5_o3(self):
        """ 整理PM2.5和臭氧的数据 """
        
        # 提取数据
        df_pm2p5 = self.data['PM2.5']
        df_o3 = self.data['O3']

        # 超标天标记
        df_pm2p5_i = df_pm2p5.apply(lambda x: x >= 35)
        df_pm2p5_ii = df_pm2p5.apply(lambda x: x >= 75)
        df_o3_i = df_o3.apply(lambda x: x >= 100)
        df_o3_ii = df_o3.apply(lambda x: x >= 160)

        # 按月分组
        # group_monthly_pm2p5 = df_pm2p5.resample('MS')
        # group_monthly_o3 = data_o3.resample('MS')

        # 按月统计是否该月全为nan值
        is_all_nan = df_pm2p5.resample('MS').apply(lambda x: x.isna().all())
        # is_all_nan_o3 = group_monthly_o3.apply(lambda x: x.isna().all())

        # 城市列表
        list_city = df_pm2p5.columns.tolist()
        
        # 分城市统计
        for city in list_city:

            # 合并超过一级标准的PM2.5和O3数据
            df_city_i = pd.concat(objs=[df_pm2p5_i.loc[:, city], df_o3_i.loc[:, city]], axis=1)

            # 合并超过二级标准的PM2.5和O3数据
            df_city_ii = pd.concat(objs=[df_pm2p5_ii.loc[:, city], df_o3_ii.loc[:, city]], axis=1)

            # 按月统计同时超一级标准的天数
            series_city_i = df_city_i.all(axis=1).resample('MS').apply(lambda x: (x == True).sum())

            # 按月统计同时超二级标准的天数
            series_city_ii = df_city_ii.all(axis=1).resample('MS').apply(lambda x: (x == True).sum())

            # 合并超二级标准和一级标准的数据
            df_city = pd.concat(objs=[series_city_i, series_city_ii], axis=1)

            # columns
            df_city.columns = ['PM₂.₅ Daily Mean ≥ 35 μg m⁻³ & O₃-MDA8 ≥ 100 μg m⁻³', 
                               'PM₂.₅ Daily Mean ≥ 75 μg m⁻³ & O₃-MDA8 ≥ 160 μg m⁻³']
            
            # 将全月为nan的月份统计值设为nan
            df_city[is_all_nan] = np.nan

            # 去除全为nan的行
            df_city.dropna(how='all', axis=0, inplace=True)

            # 存入字典
            self.dict_data[city] = df_city

    def run(self):
        """ 调取相应的函数进行处理 """

        try:

            # 发送状态信号
            self.signal_progress.emit({'text': '计算首要污染物...', 'value': None})

            if self.species == 'PM2.5':
                self.pm2p5()
            elif self.species == 'O3':
                self.o3()
            elif self.species == 'PM2.5+O3':
                self.pm2p5_o3()

            # 发送状态信号
            self.signal_progress.emit({'text': '作图中...', 'value': None})

            # 作图
            # plot_html.plot_primary_pollutant_by_city_bokeh(data_=dict_data_city, path_html=self.path_html)
            plot_html.bar_num_of_days_over_standard_days_every_monthly_bokeh(data_=self.dict_data, path_html=self.path_html)

            # 发送状态信号
            self.signal_progress.emit({'text': '完成', 'value': 1000, 'data': self.dict_data, 'filename_xlsx': self.filename_xlsx})
        
        except:
            # 错误信息返回
            self.signal_error.emit(traceback.format_exc())


class PlotTS(QThread):
    """ 开辟新进程, 时间序列数据作图

        data_: dict, key为物种名称, value为pd.DataFrame
        path_html: 作图文件存放路径
        time_resolution: 时间分辨率
        by: 根据物种/导出区域分类作图, species/regions

    无返回值
    2023-09-11
    """

    # 信号槽：状态值 {'text': , 'value': }
    signal_progress = pyqtSignal(dict)

    # 信号槽：错误信息
    signal_error = pyqtSignal(str)

    def __init__(self, data_: dict, path_html: str, time_resolution: str, by: str):
        super().__init__()

        self.data = data_
        # self.time_resolution = time_resolution
        self.path_html = path_html

        self.by = by
    
    def run(self):
        """ 调取相应的函数进行处理 """

        try:

            # 按区域分类
            if self.by == 'regions':

                # 判断CO是否在其中
                if 'CO' in self.data.keys():

                    # 将CO浓度乘以100
                    self.data['CO×100'] = self.data['CO'] * 100

                    # 删除CO
                    del self.data['CO']

                # 物种列表
                list_species = list(self.data.keys())

                # 省/市/站点列表
                list_regeions = self.data[list_species[0]].columns.tolist()

                # 数据转换
                dict_data = dict()
                for r in list_regeions:
                    # 合并数据
                    df_r = pd.concat(objs=[self.data[s].loc[:, r] for s in list_species], axis=1)

                    # 设置表头
                    df_r.columns = list_species

                    # 数据添加至字典
                    dict_data[r] = df_r

                self.data = dict_data

            # 发送状态信号
            self.signal_progress.emit({'text': '作图中...', 'value': None})

            # 作图
            # if self.time_resolution == 'H':
            #     pass
            # elif self.time_resolution == 'D':
            #     pass
            # elif self.time_resolution == 'MS':
            #     pass
            # elif self.time_resolution == 'YS':
            #     pass

            plot_html.ts_bokeh(data_=self.data, path_html=self.path_html)

            # plot_html.plot_primary_pollutant_by_city_bokeh(data_=self.data, path_html=self.path_html)

            # 发送状态信号
            self.signal_progress.emit({'text': '完成', 'value': 1000})
        
        except:
            # 错误信息返回
            self.signal_error.emit(traceback.format_exc())


class PlotBar(QThread):
    """ 开辟新进程, 年均值数据作图

        data_: dict, key为物种名称, value为pd.DataFrame
        path_html: 作图文件存放路径
        time_resolution: 时间分辨率

    无返回值
    2023-09-13
    """

    # 信号槽：状态值 {'text': , 'value': }
    signal_progress = pyqtSignal(dict)

    # 信号槽：错误信息
    signal_error = pyqtSignal(str)

    def __init__(self, data_: dict, path_html: str):
        super().__init__()

        self.data = data_
        self.path_html = path_html
    
    def run(self):
        """ 调取相应的函数进行处理 """

        try:

            # 物种列表
            list_species = list(self.data.keys())

            # 省/市/站点列表
            list_regeions = self.data[list_species[0]].columns.tolist()

            # 数据转换
            dict_data = dict()
            for r in list_regeions:

                # 合并数据
                df_r = pd.concat(objs=[self.data[s].loc[:, r] for s in list_species], axis=1)

                # 设置表头
                df_r.columns = list_species

                # 数据添加至字典
                dict_data[r] = df_r

            self.data = dict_data

            # 发送状态信号
            self.signal_progress.emit({'text': '作图中...', 'value': None})

            plot_html.bar_year_bokeh(data_=self.data, path_html=self.path_html)

            # 发送状态信号
            self.signal_progress.emit({'text': '完成', 'value': 1000})
        
        except:
            # 错误信息返回
            self.signal_error.emit(traceback.format_exc())


if __name__ == '__main__':
    # get_datetime_range()

    """ CSV to HDF5 """
    app = QCoreApplication([])
    # db = Csv2Hdf(dir_csv=r'E:\bigdata\gkd\china_sites_csv', 
    #              dir_h5=r'E:\bigdata\gkd\china_sites_hdf5_')
    # db.start()
    # db.wait()
    # app.quit()

    """ 进程池读取数据 """
    # 读取站点信息
    # t0 = time.time()
    si = SiteInfo()

    provinces = ['河北省']

    list_sites = list(chain(*[si.dict_city2code[i] for i in si.list_city_china]))
    print(len(list_sites))

    # list_species = ['PM2.5', 'NO2', 'O3', 'CO', 'SO2', 'PM10', 'AQI']

    # dict_result = ReadGkd(dir_h5=r'E:\bigdata\gkd\china_sites_hdf5_',
    #                       speices=list_species,
    #                       sites=list_sites,
    #                       date_range=['2015-01-01', '2022-12-31'],
    #                       )

    # dict_result.start()
    # dict_result.wait()
    # app.quit()
    # t1 = time.time()
    # print(t1-t0)

    """ 站点整理 """
    # d, sites_ = get_site_dict(provinces=provinces, site_type='urban', site_info=si)
    # # b = cal_gkd()

    # # pprint(d)
    # # pprint(sites_)

    # # 读取站点
    # s = read_gkd_by_sites(dir_h5=r'E:\bigdata\gkd\china_sites_hdf5_',
    # species=['PM2.5', 'NO2'],
    # sites=sites_,
    # date_range=['2015-01-01', '2022-12-31'],
    # )
    # # print(s)

    # dd = cal_gkd(s)

    from _rw import performance

    """ 全站点优化 """
    t0 = time.time()
    df_sites = read_gkd_by_sites(dir_h5=r'E:\bigdata\gkd\china_sites_hdf5_',
                                 species=['NO2'], sites=list_sites, date_range=['2015-01-01', '2022-12-31'],
                                 )['NO2']
    t1 = time.time()
    # print(df_sites)

    a = {
        "min_num_annual": 200,  # 统计年均值/分位数时至少包含的天数，国标为：324
        "min_num_seasonal": 60,  # 统计季节均值/分位数时至少包含的天数，无国标，默认为：60
        "min_num_monthly": 27,  # 统计月均值/分位数时至少包含的天数，国标为：27（2月为25）
        "min_num_daily": 20,  # 统计日均值时至少包含的小时数据，国标为：20
        "min_num_8h": 10,  # 统计MDA8时至少包含的8小时滑动均值数据个数，国标为：14
        "min_num_moving_avg": 6,  # 统计MDA8时每个滑动均值包含的小时数据，国标为：6
    }

    # 计算日均值
    # performance(cal.cal_daily, df_sites, 'H', 20, False)

    # df_sites_mean = cal_gkd_sites_by_species(data_=df_sites, time_resolution='D', 
    #  o3_mda8=False, dict_stat_standard=a, percentile=None)

    dict_city, list_sites = get_site_dict(cities=si.list_city_china, site_type='urban', site_info=si)

    # performance(cal.cal_daily, df_sites, 'H', a['min_num_daily'], False)
    performance(cal_gkd_cities_by_species, df_sites, 'D', False, a, dict_city, None)
