#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import time, sys, datetime, logging, os
import pandas as pd
import numpy as np
from shutil import copyfile
from sys import exit

import db_utils as util

from app.models.T_DWD_FACT_ZZSC_LZZB_J001 import FeatureInfo, Summary, STSEntry, ExitPdoDataEntry, FactDetail, Range
from app.LoggableObject import LoggableObject
from app.utils.XLogger import XLogger
from app.utils.XUtils import XUtils


class RetryableQuery(LoggableObject):
    db_conn = None
    sql = None
    max_times = -1

    def __init__(self, p_db_conn=None, p_sql=None, p_max_times=1):
        super(RetryableQuery, self).__init__()
        self.db_conn = p_db_conn
        self.sql = p_sql
        self.max_times = p_max_times
        self.__times = -1

    def redo(self):
        df = None
        try:
            self.__times += 1
            if self.__times < self.max_times:
                df = util.query(conn=self.db_conn, sql=self.sql)
        except Exception as e:
            self.logger.error(str(e))
            df = self.redo()
        return df


class RetryableSave(LoggableObject):
    db_conn = None
    dataframe = None
    max_times = -1

    def __init__(self, p_db_conn=None, p_dataframe=None, p_max_times=1):
        super(RetryableSave, self).__init__()
        self.db_conn = p_db_conn
        self.dataframe = p_dataframe
        self.max_times = p_max_times
        self.__times = -1

    def redo(self):
        try:
            self.__times += 1
            if self.__times < self.max_times:
                self.dataframe.to_sql(name='T_DWD_FACT_ZZSC_LZZB_0001_0112'.lower(),
                                      con=self.db_conn,
                                      schema='BGTAMAL1'.lower(),
                                      index=False,
                                      if_exists='append')
        except Exception as e:
            self.logger.error(str(e))
            self.redo()


class ColdRollingJob3(LoggableObject):
    """

    """
    config = None

    max_end_time_cache = dict()
    query_sts_cache = dict()
    related_info_cache = dict()
    low_upper_limit_cache = dict()

    def __init__(self, p_config=None):
        super(ColdRollingJob3, self).__init__()
        self.config = p_config

        self.max_end_time_cache = dict()
        self.query_sts_cache = dict()
        self.related_info_cache = dict()
        self.low_upper_limit_cache = dict()

    def execute(self, max_end_time_1=None, max_end_time_2=None):
        self.logger.info('ColdRollingJob3.execute')
        start = datetime.datetime.now()

        self.__do_execute(max_end_time_1=max_end_time_1, max_end_time_2=max_end_time_2)

        elapsed = float((datetime.datetime.now() - start).seconds)
        self.logger.info("Time Used 4 All ----->>>> %f seconds" % (elapsed))

        # NOTE clear cache 为下一次Job
        self.max_end_time_cache = dict()
        self.query_sts_cache = dict()
        self.related_info_cache = dict()
        self.low_upper_limit_cache = dict()
        pass

    def __do_execute(self, max_end_time_1=None, max_end_time_2=None):
        """
        """
        db_conn_mpp = util.getConnectionDb2(self.config.DB_HOST_MPP_DB2_BGBDPROD,
                                            self.config.DB_PORT_MPP_DB2_BGBDPROD,
                                            self.config.DB_DBNAME_MPP_DB2_BGBDPROD,
                                            self.config.DB_USER_MPP_DB2_BGBDPROD,
                                            self.config.DB_PASSWORD_MPP_DB2_BGBDPROD)
        self.logger.info('connect db_conn_mpp success?????????????????')

        db_conn_sts = util.getConnectionSTS(self.config.DB_HOST_STS,
                                            self.config.DB_PORT_STS,
                                            self.config.DB_DBNAME_STS,
                                            self.config.DB_USER_STS,
                                            self.config.DB_PASSWORD_STS)
        self.logger.info('connect db_conn_sts success?????????????????')

        db_conn_mes = util.getConnectionDb2(self.config.DB_HOST_MES_DB2_BG5AM0,
                                            self.config.DB_PORT_MES_DB2_BG5AM0,
                                            self.config.DB_DBNAME_MES_DB2_BG5AM0,
                                            self.config.DB_USER_MES_DB2_BG5AM0,
                                            self.config.DB_PASSWORD_MES_DB2_BG5AM0)
        self.logger.info('connect db_conn_mes success?????????????????')

        # db_conn_blj = util.getConnectionDb2(self.config.DB_HOST_BLJ,
        #                                     self.config.DB_PORT_BLJ,
        #                                     self.config.DB_DBNAME_BLJ,
        #                                     self.config.DB_USER_BLJ,
        #                                     self.config.DB_PASSWORD_BLJ)
        # self.logger.info('connect db_conn_blj success?????????????????')
        # sql = "select TABLE_VARID as FIELD_ID,UNIT_NUM from GXZL.SA_GX_J002"
        # dataframe_SA_GX_J002 = RetryableQuery(p_db_conn=db_conn_blj, p_sql=sql, p_max_times=5).redo()

        sql = "select * from BGTAMAL1.BASE_SU_J003"
        dataframe_BASE_SU_J003 = RetryableQuery(p_db_conn=db_conn_mpp, p_sql=sql, p_max_times=5).redo()

        # NOTE ********************** step1
        """
        # NOTE ********************** step1
        MPP查静态表
        sql = 'select * from BGTAMAL1.T_DWD_FACT_ZZSC_LZZB_J001 WHERE 1=1 order by item_id'
        """
        # NOTE 查所有特性(A机器可能具有1,2,3,4,5,6号特性;B机器可能具有2,3,4,5,6,7,8号特性;B机器可能具有5,6,7,8,9号特性......)
        # statistics_type
        # and statistics_type=2 and item_id='C608_0012'
        sql = "select * from BGTAMAL1.T_ADS_FACT_LZZB_J001 WHERE 1=1 order by item_id"
        self.logger.info('df_mpp_db2 sql=%s' % sql)
        df = RetryableQuery(p_db_conn=db_conn_mpp, p_sql=sql, p_max_times=5).redo()
        # field_id, unit_num
        # df = pd.merge(df, dataframe_SA_GX_J002, on=['field_id'], how='left')

        max_end_time_1, max_end_time_2, maxtime3, maxtime4 = self.__do_query_max_end_time_from_mpp_by()

        source = '0112SQL_template.txt'
        # NOTE 会被分成32组
        groupby = df.groupby("tbl_eng_name")
        group_index = 0
        for name, group in groupby:
            group_index += 1
            self.logger.info('\ngroup_index=%d, group_name=%s start************************' % (group_index, name))

            target_file_name = 'NEW_0112SQL_%s_TEMP_%d.txt' % (name, group.shape[0])
            try:
                copyfile(source, target_file_name)
            except Exception as e:
                self.logger.error(str(e))

            # NOTE-ABC0
            matched_str = '{PLACE_HOLDER_ABC0}'
            sql = self.__create_ABC0(group=group, max_end_time_1=max_end_time_1, max_end_time_2=max_end_time_2)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            # NOTE-ABC1
            matched_str = '{PLACE_HOLDER_ABC1}'
            sql = self.__create_ABC1(group=group, maxtime3=maxtime3, maxtime4=maxtime4)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC2}'
            sql = self.__create_ABC2(group=group, maxtime3=maxtime3, maxtime4=maxtime4)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC3}'
            sql = self.__create_ABC3(group=group)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC4}'
            sql = self.__create_ABC4(p_dataframe=group)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC5}'
            sql = self.__create_ABC5(p_dataframe=group)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC6}'
            sql = self.__create_ABC6(p_dataframe=group)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC7}'
            sql = self.__create_ABC7(p_dataframe=group)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC8}'
            sql = self.__create_ABC8(p_dataframe=group)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC9}'
            sql = self.__create_ABC9(p_dataframe=group)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC10}'
            sql = self.__create_ABC10(p_dataframe=group)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC11}'
            sql = self.__create_ABC11(p_dataframe=group)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC12}'
            sql = self.__create_ABC12(p_dataframe=group)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            matched_str = '{PLACE_HOLDER_ABC13}'
            sql = self.__create_ABC13(p_dataframe=group)
            XUtils.replaceregexp(p_target_file_name=target_file_name, p_matched_string=matched_str, p_new_string=sql)

            # 剩下的就是把那个班别啥的拼上去
            f = open(target_file_name)
            sql = f.read()
            d = RetryableQuery(p_db_conn=db_conn_sts, p_sql=sql, p_max_times=5).redo()

            # d['prod_turn'].fillna(value='i-am-none', inplace=True)
            # d = d.drop(d[(d['prod_turn'] == 'i-am-none') | (d['prod_turn'] == 'i-am-none')].index)

            if d.empty:
                continue

            d.rename(columns={'prod_date': 'end_time'}, inplace=True)

            def __cal_end_time2(x):
                # NOTE 距离最近的08:00或20:00
                t = datetime.datetime.strptime(str(x.end_time), '%Y%m%d%H%M%S')
                prev_day = t - datetime.timedelta(days=1)

                today_20 = datetime.datetime(year=t.year, month=t.month, day=t.day, hour=20).strftime('%Y%m%d%H%M%S')
                today_8 = datetime.datetime(year=t.year, month=t.month, day=t.day, hour=8).strftime('%Y%m%d%H%M%S')
                yestoday_20 = datetime.datetime(year=prev_day.year, month=prev_day.month, day=prev_day.day,
                                                hour=20).strftime('%Y%m%d%H%M%S')
                if x.end_time >= today_20:
                    rst = today_20
                elif x.end_time >= today_8:
                    rst = today_8
                else:
                    rst = yestoday_20
                return rst

            d['from'] = d.apply(lambda x: __cal_end_time2(x), axis=1)
            # NOTE 强制把BASE_SU_J003的列名都改成小写
            dataframe_BASE_SU_J003.columns = dataframe_BASE_SU_J003.columns.str.lower()
            d = pd.merge(d, dataframe_BASE_SU_J003, on=['from'], how='left')

            def __cal_prod_turn(x):
                rst = int(x.turn)
                return rst

            d['prod_turn'] = d.apply(lambda x: __cal_prod_turn(x), axis=1)

            def __cal_prod_month(x):
                t = datetime.datetime.strptime(str(x.date), '%Y%m%d')
                return t.strftime('%Y%m')

            d['PROD_MONTH'] = d.apply(lambda x: __cal_prod_month(x), axis=1)

            def __cal_prod_date2(x):
                # NOTE 生产结束时间的前八位
                t = datetime.datetime.strptime(str(x.date), '%Y%m%d')
                return t.strftime('%Y%m%d')

            d['prod_date'] = d.apply(lambda x: __cal_prod_date2(x), axis=1)

            def __cal_REC_CREATOR(x):
                t = '----'
                return t
            d['REC_CREATOR'] = d.apply(lambda x: __cal_REC_CREATOR(x), axis=1)

            def __cal_REC_CREATE_TIME(x):
                t = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
                return t
            d['REC_CREATE_TIME'] = d.apply(lambda x: __cal_REC_CREATE_TIME(x), axis=1)

            def __cal_REC_CREATE_DATE(x):
                t = datetime.datetime.now().strftime('%Y%m%d')
                return t
            d['REC_CREATE_DATE'] = d.apply(lambda x: __cal_REC_CREATE_DATE(x), axis=1)

            def __cal_REC_REVISOR(x):
                t = '----'
                return t
            d['REC_REVISOR'] = d.apply(lambda x: __cal_REC_REVISOR(x), axis=1)

            def __cal_REC_REVISOR_TIME(x):
                t = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
                return t
            d['REC_REVISOR_TIME'] = d.apply(lambda x: __cal_REC_REVISOR_TIME(x), axis=1)

            #
            for index, row in group.iterrows():
                feature_info = FeatureInfo(p_dic=row)
                d.columns = d.columns.str.upper()
                d.rename(
                    columns={'ITEM_CHN_NAME_%s' % (feature_info.feature_code): 'ITEM_CHN_NAME',
                             'ITEM_ID_%s' % (feature_info.feature_code): 'ITEM_ID',
                             'QUALIFIED_RATE_%s' % (feature_info.feature_code): 'QUALIFIED_RATE',
                             'QUALIFIED_JUDGE_%s' % (feature_info.feature_code): 'QUALIFIED_JUDGE',
                             'SUM_X_%s' % (feature_info.feature_code): 'QUALIFIED_QTY',
                             'SUM_Y': 'TOTAL_QTY',
                             'TOTAL_QTY_%s' % (feature_info.feature_code): 'TOTAL_QTY',
                             'ST_NO': 'TAPPING_MARK'},
                    inplace=True)

                self.logger.info('df_properties-')
                df_properties = d.columns.values
                self.logger.info(",".join(df_properties))

                #
                table_properties = ['ITEM_ID', 'ITEM_CHN_NAME', 'END_TIME', 'FACTORY_DESC', 'UNIT_CODE', 'PROD_COILNO',
                                    'ENTRY_COILNO', 'SECTION_ID', 'SECTION_NAME', 'EXIT_WT', 'TAPPING_MARK',
                                    'QUALIFIED_RATE',
                                    'QUALIFIED_QTY', 'TOTAL_QTY', 'QUALIFIED_JUDGE', 'PROD_DATE', 'PROD_MONTH',
                                    'PROD_TURN',
                                    'REC_CREATOR', 'REC_CREATE_TIME', 'REC_CREATE_DATE', 'REC_REVISOR',
                                    'REC_REVISOR_TIME']

                self.logger.info('diff-1')
                diff1 = list(
                    set(df_properties).difference(set(table_properties)))  # df_properties中有而table_properties中没有的
                self.logger.info(",".join(diff1))

                self.logger.info('diff-2')
                diff2 = list(
                    set(table_properties).difference(set(df_properties)))  # table_properties中有而df_properties中没有的
                self.logger.info(",".join(diff2))

                # 删除多余列
                for p in diff1:
                    d.drop(p, axis=1, inplace=True)
                #
                RetryableSave(p_db_conn=db_conn_mpp, p_dataframe=d, p_max_times=5).redo()

            self.logger.info('group_index=%d, group_name=%s completed************************\n' % (group_index, name))


        # return
        #
        # name = '??????????????????????????'
        #
        # self.logger.info('------------------------------start dataframe.rows_len=%d' % (df.shape[0]))
        # for index, row in df.iterrows():
        #     self.logger.info('\n\n\n################################################start index=%d' % index)
        #     feature_info = FeatureInfo(p_dic=row)
        #     self.__do_process_data(p_dataframe_BASE_SU_J003=dataframe_BASE_SU_J003,
        #                            p_tbl_eng_name=name,
        #                            p_dataframe=None,
        #                            p_index=index,
        #                            p_conn_mpp=db_conn_mpp,
        #                            p_conn_sts=db_conn_sts,
        #                            p_conn_mes=db_conn_mes,
        #                            p_feature_info=feature_info,
        #                            max_end_time_1=None,
        #                            max_end_time_2=None)
        #     self.logger.info('################################################end index=%d\n\n\n' % index)
        # self.logger.info('------------------------------end')

        # close
        util.closeConnection(db_conn_mpp)
        util.closeConnection(db_conn_sts)
        util.closeConnection(db_conn_mes)

    def __create_ABC0(self, group, max_end_time_1, max_end_time_2):
        # NOTE-ABC
        field_eng_name_properties = list()
        for index, row in group.iterrows():
            t = FeatureInfo(p_dic=row)
            v = "%s*%s as field_value_%s" % (t.field_eng_name, t.qty_grade, t.feature_code)
            if v not in field_eng_name_properties:
                field_eng_name_properties.append(v)
        # create dynamic sql
        dynamic_properties_str = ','.join(field_eng_name_properties)
        sql = " select %s as entry_coilno, %s from %s.%s where 1=1 and %s>='%s' and %s<='%s' " % (
            t.entry_coilno_field,
            dynamic_properties_str,
            t.tbl_schema,
            t.tbl_eng_name,
            t.time_field,
            max_end_time_1,
            t.time_field,
            max_end_time_2)
        return sql

    def __create_ABC1(self, group, maxtime3, maxtime4):
        # NOTE-ABC1
        sql = 'None'
        for index, row in group.iterrows():
            t = FeatureInfo(p_dic=row)
            table_name = 'BGTASO%s.%s_0000' % (t.unit_code, t.unit_code)
            if t.unit_code in ['C608', 'C708']:
                # C608, C708
                sql = "select " \
                      "entry_coil_no as entry_coilno, " \
                      "delivery_coil_no as prod_coilno, " \
                      "process_end_time as prod_date, " \
                      "crew as prod_turn," \
                      "shift as shift, " \
                      "deliver_weight_act as exit_wt " \
                      "from %s " \
                      % (table_name)
            if t.unit_code == 'C502':
                # 502
                sql = "select " \
                      "entcoil as entry_coilno, " \
                      "outcoil as prod_coilno, " \
                      "end_time as prod_date, " \
                      "turn as prod_turn," \
                      "shift as shift, " \
                      "weight as exit_wt " \
                      "from %s " \
                      % (table_name)
            if t.unit_code == 'C512':
                # 512
                sql = "select " \
                      "entcoil as entry_coilno, " \
                      "outcoil as prod_coilno, " \
                      "endtime as prod_date, " \
                      "crew as prod_turn," \
                      "shift as shift, " \
                      "out_weight as exit_wt " \
                      "from %s " \
                      % (table_name)
            break
        return sql

    def __create_ABC2(self, group, maxtime3, maxtime4):
        sql = 'None'
        # bgtasolzl3.TCPCR5A01
        for index, row in group.iterrows():
            t = FeatureInfo(p_dic=row)
            if t.unit_code in ['C608', 'C708']:
                # 要改成 但unit_code=C608,C708时是去这个表查bgtasolzl3.TCPCR5A03
                table_name = 'bgtasolzl3.TCPCR5A03'
            elif t.unit_code == 'C502':
                # 当unit_code=C502时是去这个表查bgtasolzl3.TCPCR5A01
                table_name = 'bgtasolzl3.TCPCR5A01'
            elif t.unit_code == 'C512':
                # 当unit_code=C512时是去这个表查bgtasolzl3.TCPCR5A02
                table_name = 'bgtasolzl3.TCPCR5A02'
            else:
                table_name = ''
            # st_no(出钢记号)这个字段也先取出来,最后写入到数据库用
            # key_prc_char_code  mat_no分别是那个表里的特性代码. 入口卷号
            # 用出口卷号去查那个特征的上限下限
            # 那个mat_no记录的是入口卷号 不是出口卷号。。。
            sql = " select " \
                  " st_no, " \
                  " cp_item_n_001," \
                  " (CASE when cp_item_n_003=-9999 then 9999 else cp_item_n_003 end) as cp_item_n_003, " \
                  " unit_code, " \
                  " key_prc_char_code as feature_code, " \
                  " mat_no as entry_coilno " \
                  " from " \
                  " %s " \
                  " where " \
                  " 1=1 " \
                  " and unit_code='%s' " \
                  % (table_name,
                     t.unit_code)
            break
        return sql

    def __create_ABC3(self, group):
        mylist = list()
        for index, row in group.iterrows():
            t = FeatureInfo(p_dic=row)
            mylist.append("sum(case when a.feature_code = '%s' then a.cp_item_n_001 else 0 end )as cp_item_n_001_%s" % (
                t.feature_code, t.feature_code))
            mylist.append("sum(case when a.feature_code = '%s' then a.cp_item_n_003 else 0 end )as cp_item_n_003_%s" % (
                t.feature_code, t.feature_code))
        sql = ','.join(mylist)
        return sql

    def __create_ABC4(self, p_dataframe=None):
        # select t1.*,t2.st_no,t2.unit_code,{PLACE_HOLDER_ABC4}
        # ,1 as tmp_qty
        """
        select t1.*,t2.st_no,t2.unit_code,

        t2.cp_item_n_001_I008,
        t2.cp_item_n_003_I008,
        t2.cp_item_n_001_I005,
        t2.cp_item_n_003_I005,
        t2.cp_item_n_001_I006,
        t2.cp_item_n_003_I006,
        t2.cp_item_n_001_I009,
        t2.cp_item_n_003_I009,
        t2.cp_item_n_001_I010,
        t2.cp_item_n_003_I010,
        t2.cp_item_n_001_I007,
        t2.cp_item_n_003_I007,
        (case when field_value_I008>=cp_item_n_001_I008 and field_value_I008<=cp_item_n_003_I008 then 1 else 0 end)as OK_I008,
        (case when field_value_I005>=cp_item_n_001_I005 and field_value_I005<=cp_item_n_003_I005 then 1 else 0 end)as OK_I005,
        (case when field_value_I006>=cp_item_n_001_I006 and field_value_I006<=cp_item_n_003_I006 then 1 else 0 end)as OK_I006,
        (case when field_value_I009>=cp_item_n_001_I009 and field_value_I009<=cp_item_n_003_I009 then 1 else 0 end)as OK_I009,
        (case when field_value_I010>=cp_item_n_001_I010 and field_value_I010<=cp_item_n_003_I010 then 1 else 0 end)as OK_I010,
        (case when field_value_I007>=cp_item_n_001_I007 and field_value_I007<=cp_item_n_003_I007 then 1 else 0 end)as OK_I007,

        1 as tmp_qty
        :return:
        """
        sql = ""
        mylist = list()
        for index, row in p_dataframe.iterrows():
            t = FeatureInfo(p_dic=row)
            mylist.append("t2.cp_item_n_001_%s" % (t.feature_code))
            mylist.append("t2.cp_item_n_003_%s" % (t.feature_code))
            mylist.append(
                "(case when field_value_%s>=cp_item_n_001_%s and field_value_%s<=cp_item_n_003_%s then 1 else 0 end) as OK_%s" % (
                    t.feature_code, t.feature_code, t.feature_code, t.feature_code, t.feature_code))
        sql = ','.join(mylist)
        return sql

    def __create_ABC5(self, p_dataframe=None):
        """
        sum(t3.OK_I008) as sum_x_I008,
        sum(t3.OK_I005) as sum_x_I005,
        sum(t3.OK_I006) as sum_x_I006,
        sum(t3.OK_I009) as sum_x_I009,
        sum(t3.OK_I010) as sum_x_I010,
        sum(t3.OK_I007) as sum_x_I007,
        :return:
        """
        mylist = list()
        for index, row in p_dataframe.iterrows():
            t = FeatureInfo(p_dic=row)
            mylist.append("sum(t3.OK_%s) as sum_x_%s" % (t.feature_code, t.feature_code))
        sql = ','.join(mylist)
        return sql

    def __create_ABC6(self, p_dataframe=None):
        """"
        t4.sum_x_I008/t4.sum_y*100 as qualified_rate_I008,
        t4.sum_x_I005/t4.sum_y*100 as qualified_rate_I005,
        t4.sum_x_I006/t4.sum_y*100 as qualified_rate_I006,
        t4.sum_x_I009/t4.sum_y*100 as qualified_rate_I009,
        t4.sum_x_I010/t4.sum_y*100 as qualified_rate_I010,
        t4.sum_x_I007/t4.sum_y*100 as qualified_rate_I007
        """
        mylist = list()
        for index, row in p_dataframe.iterrows():
            t = FeatureInfo(p_dic=row)
            mylist.append("t4.sum_x_%s/t4.sum_y*100 as qualified_rate_%s" % (t.feature_code, t.feature_code))
        sql = ','.join(mylist)
        return sql

    def __create_ABC7(self, p_dataframe=None):
        """"
        t5.qualified_rate_I008,
        t5.qualified_rate_I005,
        t5.qualified_rate_I006,
        t5.qualified_rate_I009,
        t5.qualified_rate_I010,
        t5.qualified_rate_I007,
        t5.sum_x_I008,
        t5.sum_x_I005,
        t5.sum_x_I006,
        t5.sum_x_I009,
        t5.sum_x_I010,
        t5.sum_x_I007
        """
        mylist = list()
        for index, row in p_dataframe.iterrows():
            t = FeatureInfo(p_dic=row)
            mylist.append("t5.qualified_rate_%s" % (t.feature_code))
            mylist.append("t5.sum_x_%s" % (t.feature_code))
        sql = ','.join(mylist)
        return sql


    def __create_ABC8(self, p_dataframe=None):
        """
        (case when t7.qualified_rate_I008>=90 and t7.qualified_rate_I008<=100 then 1 when t7.qualified_rate_I008>=70 and t7.qualified_rate_I008<90 then 2 else 3 end)as qualified_judge_I008,
        (case when t7.qualified_rate_I005>=90 and t7.qualified_rate_I005<=100 then 1 when t7.qualified_rate_I005>=70 and t7.qualified_rate_I005<90 then 2 else 3 end)as qualified_judge_I005,
        (case when t7.qualified_rate_I006>=90 and t7.qualified_rate_I006<=100 then 1 when t7.qualified_rate_I006>=70 and t7.qualified_rate_I006<90 then 2 else 3 end)as qualified_judge_I006,
        (case when t7.qualified_rate_I009>=90 and t7.qualified_rate_I009<=100 then 1 when t7.qualified_rate_I009>=70 and t7.qualified_rate_I009<90 then 2 else 3 end)as qualified_judge_I009,
        (case when t7.qualified_rate_I010>=90 and t7.qualified_rate_I010<=100 then 1 when t7.qualified_rate_I010>=70 and t7.qualified_rate_I010<90 then 2 else 3 end)as qualified_judge_I010,
        (case when t7.qualified_rate_I007>=90 and t7.qualified_rate_I007<=100 then 1 when t7.qualified_rate_I007>=70 and t7.qualified_rate_I007<90 then 2 else 3 end)as qualified_judge_I007
        :return:
        """
        mylist = list()
        for index, row in p_dataframe.iterrows():
            t = FeatureInfo(p_dic=row)
            mylist.append(
                "(case when t7.qualified_rate_%s>=90 and t7.qualified_rate_%s<=100 then 1 when t7.qualified_rate_%s>=70 and t7.qualified_rate_%s<90 then 2 else 3 end) as qualified_judge_%s" % (
                    t.feature_code, t.feature_code, t.feature_code, t.feature_code, t.feature_code))
        sql = ','.join(mylist)
        return sql

    def __create_ABC9(self, p_dataframe=None):
        """
        'C502_0004' as item_id_I008,
        '1#酸槽HCL浓度(%)' as item_chn_name_I008,
        'C502_0001' as item_id_I005,
        '1#酸槽Fe2+浓度(%)' as item_chn_name_I005,
        'C502_0002' as item_id_I006,
        '2#酸槽Fe2+浓度(%)' as item_chn_name_I006,
        'C502_0005' as item_id_I009,
        '2#酸槽HCL浓度(%)' as item_chn_name_I009,
        'C502_0006' as item_id_I010,
        '3#酸槽HCL浓度(%)' as item_chn_name_I010,
        'C502_0003' as item_id_I007,
        '3#酸槽Fe2+浓度(%)' as item_chn_name_I007,
        :return:
        """
        mylist = list()
        for index, row in p_dataframe.iterrows():
            t = FeatureInfo(p_dic=row)
            mylist.append("'%s' as item_id_%s" % (t.item_id, t.feature_code))
            mylist.append("'%s' as item_chn_name_%s" % (t.item_chn_name, t.feature_code))
        sql = ','.join(mylist)
        return sql


    def __create_ABC10(self, p_dataframe=None):
        """

        :return:
        """
        sql = 'None'
        for index, row in p_dataframe.iterrows():
            t = FeatureInfo(p_dic=row)
            sql = t.section_name
            break
        return sql

    def __create_ABC10(self, p_dataframe=None):
        """

        :return:
        """
        sql = 'None'
        for index, row in p_dataframe.iterrows():
            t = FeatureInfo(p_dic=row)
            sql = t.section_name
            break
        return sql

    def __create_ABC11(self, p_dataframe=None):
        """

        :return:
        """
        sql = 'None'
        for index, row in p_dataframe.iterrows():
            t = FeatureInfo(p_dic=row)
            sql = t.section_id
            break
        return sql

    def __create_ABC12(self, p_dataframe=None):
        """

        :return:
        """
        sql = 'None'
        for index, row in p_dataframe.iterrows():
            t = FeatureInfo(p_dic=row)
            sql = t.unit_code
            break
        return sql


    def __create_ABC13(self, p_dataframe=None):
        """

        :return:
        """
        sql = 'None'
        for index, row in p_dataframe.iterrows():
            t = FeatureInfo(p_dic=row)
            sql = t.factory_desc
            break
        return sql


    def __do_process_data(self,
                          p_dataframe_BASE_SU_J003=None,
                          p_tbl_eng_name=None,
                          p_dataframe=None,
                          p_index=0,
                          p_conn_mpp=None,
                          p_conn_sts=None,
                          p_conn_mes=None,
                          p_feature_info=None,
                          max_end_time_1=None,
                          max_end_time_2=None):
        """
        其中统计类型statistics_type有两种情况，1代表低频数据统计，2代表高频数据统计
        :return:
        """
        # NOTE step--1.1
        start = datetime.datetime.now()
        max_end_time_1, max_end_time_2, maxtime3, maxtime4 = self.__do_query_max_end_time_from_mpp_by()
        elapsed = float((datetime.datetime.now() - start).seconds)

        # NOTE step-- dataframe_1 = ???????  查STS， 根据什么条件查STS
        # NOTE********************** step2   总共65行静态特性数据，那么就对应65个dataframe-1
        # 建立空的dataframe1
        # 对step1取的数据进行for循环  一条一条去STS查询
        # sql = " select %s as entry_coilno, %s as field_value from %s.%s where 1=1 and %s>='%s' and %s<='%s' " % (
        #     p_feature_info.entry_coilno_field,
        #     p_feature_info.field_eng_name,
        #     p_feature_info.tbl_schema,
        #     p_feature_info.tbl_eng_name,
        #     p_feature_info.time_field,
        #     p_max_end_time_1,
        #     p_feature_info.time_field,
        #     p_max_end_time_2)
        # 得到65个dataframe1，
        # dataframe1创立时要补全从静态表读到的信息FACTORY_DESC,UNIT_CODE,ITEM_ID,ITEM_CHN_NAME,SECTION_ID,SECTION_NAME,FEATURE_CODE,STATISTICS_TYPE
        sts_dict, dataframe_1 = self.__do_query_sts_db_by(p_conn_sts=p_conn_sts,
                                                          p_tbl_eng_name=p_tbl_eng_name,
                                                          p_dataframe=p_dataframe,
                                                          p_feature_info=p_feature_info,
                                                          p_max_end_time_1=max_end_time_1,
                                                          p_max_end_time_2=max_end_time_2)

        # NOTE dataframe_2 = 补全信息
        # NOTE********************** step3
        # # 建立空的dataframe2
        # # 分别去STS里查询静态表
        # 总共有65个dataframe2
        sts_base_list, dataframe_2 = self.__query_related_info_4_merge(p_conn_sts=p_conn_sts,
                                                                       p_feature_info=p_feature_info,
                                                                       maxtime3=maxtime3,
                                                                       maxtime4=maxtime4)
        # NOTE 当prod_coilno_field没有数据时 dataframe2的select要进行修改  增加一个去重，并且出口卷号出口重量的位置直接写死成空
        if p_feature_info.prod_coilno_field is None:
            pass

        # NOTE dataframe_3 = ???????
        # NOTE********************** step4
        # NOTE 将dataframe1和dataframe2  合并在一起 得到dataframe3也是65个 on  entry_coilno，unit_code
        # self.logger.info('merge dataframe_1+dataframe_2-->dataframe_3 before_drop--len=%d' % dataframe_3.shape[0])
        # # NOTE 如果df3中的出口卷号是空， 就丢弃掉该条记录
        # dataframe_3['prod_coilno'].fillna(value='i-am-none', inplace=True)
        # dataframe_3 = dataframe_3.drop(
        #     dataframe_3[(dataframe_3['prod_coilno'] == 'i-am-none')].index)
        # self.logger.info('merge dataframe_1+dataframe_2-->dataframe_3 after_drop--len=%d' % dataframe_3.shape[0])

        # NOTE********************** step5
        # 建立空的dataframe4
        #
        # 查询完也有65个dataframe4
        # 去mes  取上下限
        consider, ok, r, dataframe_4 = self.__do_query_low_upper_limit_from_mes_by_4_merge(
            p_conn_mes=p_conn_mes,
            p_feature_info=p_feature_info,
            maxtime3=maxtime3,
            maxtime4=maxtime4)
        self.logger.info('dataframe_4 done ')

        # NOTE********************** step6
        #
        # 将dataframe3和dataframe4  合并得到dataframe5   on feature_code ，entry_coilno ，unit_code
        # dataframe_5 = pd.merge(dataframe_3, dataframe_4, on=['feature_code', 'entry_coilno', 'unit_code'], how='left')
        # 将dataframe1所有特性的实际值与dataframe4先按照原有逻辑进行merge得到dataframe5
        dataframe_5 = pd.merge(dataframe_1, dataframe_4, on=['feature_code', 'entry_coilno', 'unit_code'], how='left')
        self.logger.info('merge dataframe_3+dataframe_4-->dataframe_5 before_drop--len=%d' % dataframe_5.shape[0])
        # df[f1].fillna(value=pd.np.nan, inplace=True)
        dataframe_5['cp_item_n_001'].fillna(value='i-am-none', inplace=True)
        dataframe_5['cp_item_n_003'].fillna(value='i-am-none', inplace=True)
        # NOTE 然后3和4merge成5之后  如果上下限有一个是空值也直接丢掉
        dataframe_5 = dataframe_5.drop(
            dataframe_5[
                (dataframe_5['cp_item_n_001'] == 'i-am-none') | (dataframe_5['cp_item_n_003'] == 'i-am-none')].index)
        self.logger.info('merge dataframe_3+dataframe_4-->dataframe_5 after_drop--len=%d' % dataframe_5.shape[0])

        # 加一个没数据能跳过
        if dataframe_5.shape[0] <= 0:
            return

        # NOTE 现在就时要去判定field_value，和cp_item_n_001，cp_item_n_003的关系
        # NOTE 如果在其中就是ok = 1 不在就时ok = 0
        # 下限 lower limit  cp_item_n_001
        # 上限 upper limit cp_item_n_003
        def __cal_ok(x):
            ok = -1
            if x.field_value >= x.cp_item_n_001 and x.field_value <= x.cp_item_n_003:
                ok = 1
            else:
                ok = 0
            return ok

        dataframe_5['ok'] = dataframe_5.apply(lambda x: __cal_ok(x), axis=1)
        self.logger.info('dataframe_5 done ')

        # NOTE************** step7
        # 得到65个dataframe5后
        # 对每一个都进行一次groupby求和，按照prod_coilno，item_id，对刚刚计算的OK那列进行求和，得到每个卷这个特征的ok总点数QUALIFIED_QTY，和总点数TOTAL_QTY
        # 然后得到每个出口卷号一条数据  相当于将高频数据合并了
        # 然后增加一列QUALIFIED_RATE=QUALIFIED_QTY/TOTAL_QTY*100
        # NOTE 强行加一列 tmp_qty
        dataframe_5['tmp_qty'] = 1
        if p_feature_info.prod_coilno_field is None:
            # NOTE 当prod_coilno_field没有数据时, groupby求和判断（注意此时不用出口卷号，要用入口卷号groupby）
            a = dataframe_5.groupby(['entry_coilno', 'item_id'])['ok'].agg([np.sum]).round(2)
            b = dataframe_5.groupby(['entry_coilno', 'item_id'])['tmp_qty'].agg([np.sum]).round(2)
            dataframe_5.drop_duplicates(subset=['entry_coilno', 'item_id'], keep='first', inplace=True)
        else:
            # NOTE 当prod_coilno_field有数据时 按照原有逻辑groupby求和判断完之后
            a = dataframe_5.groupby(['prod_coilno', 'item_id'])['ok'].agg([np.sum]).round(2)
            b = dataframe_5.groupby(['prod_coilno', 'item_id'])['tmp_qty'].agg([np.sum]).round(2)
            dataframe_5.drop_duplicates(subset=['prod_coilno', 'item_id'], keep='first', inplace=True)

        # NOTE dataframe_5['qualified_rate'] = (a/b)*100 ？？？？？？？？？？？
        if p_feature_info.prod_coilno_field is None:
            c = pd.merge(a, b, on=['entry_coilno', 'item_id'], how='left')
            d = pd.merge(c, dataframe_5, on=['entry_coilno', 'item_id'], how='left')
        else:
            c = pd.merge(a, b, on=['prod_coilno', 'item_id'], how='left')
            d = pd.merge(c, dataframe_5, on=['prod_coilno', 'item_id'], how='left')

        # dataframe_5['qualified_rate'] = 88 #dataframe_5.apply(lambda x: __cal_ok(x), axis=1)
        def __cal_qualified_rate(x):
            rst = int(100 * (x.sum_x / x.sum_y))
            return rst

        d['QUALIFIED_RATE'] = d.apply(lambda x: __cal_qualified_rate(x), axis=1)

        # # QUALIFIED_RATE<=50的数据
        # tmp_df = d.loc[d['QUALIFIED_RATE'] <= 50]
        # writer = pd.ExcelWriter('output__%d.xlsx' % p_index)
        # tmp_df.to_excel(writer)
        # writer.save()

        def __cal_QUALIFIED_JUDGE(x):
            okk = -1
            if x.QUALIFIED_RATE in range(0, 75):
                okk = 3
            if x.QUALIFIED_RATE in range(75, 90):
                okk = 2
            if x.QUALIFIED_RATE in range(90, 100 + 1):
                okk = 1
            return okk

        d['QUALIFIED_JUDGE'] = d.apply(lambda x: __cal_QUALIFIED_JUDGE(x), axis=1)

        # NOTE 按照原有逻辑groupby求和判断完之后
        # NOTE 再将此时的dataframe与dataframe2按照原有逻辑进行merge
        # NOTE Merge补全需要写库的字段后直接写入到数据库
        # NOTE 再将此时的dataframe与dataframe2进行merge（注意此时不用入口卷号，要用出口卷号）
        if p_feature_info.prod_coilno_field is None:
            # NOTE 再将此时的dataframe与新的dataframe2进行merge（按照原来的逻辑，用入口卷号去merge）
            d = pd.merge(d, dataframe_2, on=['entry_coilno', 'unit_code'], how='left')
        else:
            # NOTE 当prod_coilno_field有数据时 再将此时的dataframe与dataframe2进行merge（注意此时不用入口卷号，要用出口卷号）
            d = pd.merge(d, dataframe_2, on=['entry_coilno', 'prod_coilno', 'unit_code'], how='left')

        d['prod_turn'].fillna(value='i-am-none', inplace=True)
        d = d.drop(d[(d['prod_turn'] == 'i-am-none') | (d['prod_turn'] == 'i-am-none')].index)

        # NOTE 当prod_coilno_field没有数据时，写库的时候  出口卷号  出口重量这两个字段直接写成空
        # if p_feature_info.prod_coilno_field is None:
        #     d['prod_coilno'] = d['prod_coilno'].apply(lambda x: None)
        #     d['exit_wt'] = d['exit_wt'].apply(lambda x: None)

        if d.empty:
            return

        # NOTE prod_turn,prod_month,prod_date， 这几个好像要放到和dataframe2  merge完才有
        # NOTE 先重命名一下
        # d.rename(columns={'prod_turn': 'prod_turn_original'}, inplace=True)
        #
        # def __cal_prod_turn2(x):
        #     rst = {'A': '1', 'B': '2', 'C': '3', 'D': '4'}[x.prod_turn_original]
        #     return rst
        #
        # # NOTE PROD_TURN只要1234  不要abcd
        # d['PROD_TURN2'] = d.apply(lambda x: __cal_prod_turn2(x), axis=1)
        #
        # # NOTE 先复制一列
        # def __cal_prod_date_original(x):
        #     return x.prod_date
        #
        # d['prod_date_original'] = d.apply(lambda x: __cal_prod_date_original(x), axis=1)
        # d.drop('prod_date', axis=1, inplace=True)
        #
        # def __cal_prod_date2(x):
        #     # NOTE 生产结束时间的前八位
        #     t = datetime.datetime.strptime(str(x.prod_date_original), '%Y%m%d%H%M%S')
        #     return t.strftime('%Y%m%d')
        #
        # d['prod_date2'] = d.apply(lambda x: __cal_prod_date2(x), axis=1)
        #
        # def __cal_prod_month(x):
        #     t = datetime.datetime.strptime(str(x.prod_date_original), '%Y%m%d%H%M%S')
        #     return t.strftime('%Y%m')
        #
        # d['PROD_MONTH'] = d.apply(lambda x: __cal_prod_month(x), axis=1)

        d.rename(columns={'prod_date': 'end_time'}, inplace=True)

        def __cal_end_time2(x):
            # NOTE 距离最近的08:00或20:00
            t = datetime.datetime.strptime(str(x.end_time), '%Y%m%d%H%M%S')
            prev_day = t - datetime.timedelta(days=1)

            today_20 = datetime.datetime(year=t.year, month=t.month, day=t.day, hour=20).strftime('%Y%m%d%H%M%S')
            today_8 = datetime.datetime(year=t.year, month=t.month, day=t.day, hour=8).strftime('%Y%m%d%H%M%S')
            yestoday_20 = datetime.datetime(year=prev_day.year, month=prev_day.month, day=prev_day.day,
                                            hour=20).strftime('%Y%m%d%H%M%S')
            if x.end_time >= today_20:
                rst = today_20
            elif x.end_time >= today_8:
                rst = today_8
            else:
                rst = yestoday_20
            return rst

        d['from'] = d.apply(lambda x: __cal_end_time2(x), axis=1)
        # NOTE 强制把BASE_SU_J003的列名都改成小写
        p_dataframe_BASE_SU_J003.columns = p_dataframe_BASE_SU_J003.columns.str.lower()
        d = pd.merge(d, p_dataframe_BASE_SU_J003, on=['from'], how='left')

        def __cal_prod_turn(x):
            rst = int(x.turn)
            return rst

        d['prod_turn'] = d.apply(lambda x: __cal_prod_turn(x), axis=1)

        def __cal_prod_month(x):
            t = datetime.datetime.strptime(str(x.date), '%Y%m%d')
            return t.strftime('%Y%m')

        d['PROD_MONTH'] = d.apply(lambda x: __cal_prod_month(x), axis=1)

        def __cal_prod_date2(x):
            # NOTE 生产结束时间的前八位
            t = datetime.datetime.strptime(str(x.date), '%Y%m%d')
            return t.strftime('%Y%m%d')

        d['prod_date'] = d.apply(lambda x: __cal_prod_date2(x), axis=1)

        def __cal_REC_CREATOR(x):
            t = '----'
            return t

        d['REC_CREATOR'] = d.apply(lambda x: __cal_REC_CREATOR(x), axis=1)

        def __cal_REC_CREATE_TIME(x):
            t = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
            return t

        d['REC_CREATE_TIME'] = d.apply(lambda x: __cal_REC_CREATE_TIME(x), axis=1)

        def __cal_REC_CREATE_DATE(x):
            t = datetime.datetime.now().strftime('%Y%m%d')
            return t

        d['REC_CREATE_DATE'] = d.apply(lambda x: __cal_REC_CREATE_DATE(x), axis=1)

        def __cal_REC_REVISOR(x):
            t = '----'
            return t

        d['REC_REVISOR'] = d.apply(lambda x: __cal_REC_REVISOR(x), axis=1)

        def __cal_REC_REVISOR_TIME(x):
            t = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
            return t

        d['REC_REVISOR_TIME'] = d.apply(lambda x: __cal_REC_REVISOR_TIME(x), axis=1)

        d.columns = d.columns.str.upper()
        d.rename(
            columns={'SUM_X': 'QUALIFIED_QTY', 'SUM_Y': 'TOTAL_QTY', 'ST_NO': 'TAPPING_MARK'},
            inplace=True)

        self.logger.info('df_properties-')
        df_properties = d.columns.values
        self.logger.info(",".join(df_properties))

        #
        table_properties = ['ITEM_ID', 'ITEM_CHN_NAME', 'END_TIME', 'FACTORY_DESC', 'UNIT_CODE', 'PROD_COILNO',
                            'ENTRY_COILNO', 'SECTION_ID', 'SECTION_NAME', 'EXIT_WT', 'TAPPING_MARK', 'QUALIFIED_RATE',
                            'QUALIFIED_QTY', 'TOTAL_QTY', 'QUALIFIED_JUDGE', 'PROD_DATE', 'PROD_MONTH', 'PROD_TURN',
                            'REC_CREATOR', 'REC_CREATE_TIME', 'REC_CREATE_DATE', 'REC_REVISOR', 'REC_REVISOR_TIME']

        self.logger.info('diff-1')
        diff1 = list(set(df_properties).difference(set(table_properties)))  # df_properties中有而table_properties中没有的
        self.logger.info(",".join(diff1))

        self.logger.info('diff-2')
        diff2 = list(set(table_properties).difference(set(df_properties)))  # table_properties中有而df_properties中没有的
        self.logger.info(",".join(diff2))

        # 删除多余列
        for p in diff1:
            d.drop(p, axis=1, inplace=True)

        RetryableSave(p_db_conn=p_conn_mpp, p_dataframe=d, p_max_times=5).redo()
        self.logger.info('bbb')

        pass

    def __do_query_sts_db_by(self,
                             p_conn_sts=None,
                             p_tbl_eng_name=None,
                             p_dataframe=None,
                             p_feature_info=None,
                             p_max_end_time_1=None,
                             p_max_end_time_2=None) -> (tuple):
        # field_eng_name_properties = list()
        # for index, row in p_dataframe.iterrows():
        #     t = FeatureInfo(p_dic=row)
        #     field_eng_name_properties.append("%s as field_value_%s" % (t.field_eng_name, t.field_eng_name))
        # field_eng_name_strs = ','.join(field_eng_name_properties)

        # FIXME 时间比较， 不能数字比较
        if p_feature_info.prod_coilno_field is None:
            # NOTE 当prod_coilno_field没有数据时 在读取dataframe1时不select出对应的出口卷号，按照原来的逻辑select
            sql = " select %s as entry_coilno, %s*%s as field_value from %s.%s where 1=1 and %s>='%s' and %s<='%s' " % (
                p_feature_info.entry_coilno_field,
                p_feature_info.field_eng_name,
                p_feature_info.qty_grade,
                p_feature_info.tbl_schema,
                p_feature_info.tbl_eng_name,
                p_feature_info.time_field,
                p_max_end_time_1,
                p_feature_info.time_field,
                p_max_end_time_2)
        else:
            # NOTE 当prod_coilno_field有数据时 在读取dataframe1时直接select出对应的 出口卷号
            sql = " select %s as entry_coilno, %s*%s as prod_coilno, %s as field_value from %s.%s where 1=1 and %s>='%s' and %s<='%s' " % (
                p_feature_info.entry_coilno_field,
                p_feature_info.prod_coilno_field,
                p_feature_info.qty_grade,
                p_feature_info.field_eng_name,
                p_feature_info.tbl_schema,
                p_feature_info.tbl_eng_name,
                p_feature_info.time_field,
                p_max_end_time_1,
                p_feature_info.time_field,
                p_max_end_time_2)

        self.logger.info('df_sts sql=%s' % sql)

        if self.query_sts_cache.get(sql) is not None:
            # self.query_sts_cache.get(sql)[1]["field_value_%s" % (p_feature_info.field_eng_name)]
            return self.query_sts_cache.get(sql)

        df = RetryableQuery(p_db_conn=p_conn_sts, p_sql=sql, p_max_times=5).redo()

        self.logger.info('df_sts start dataframe.rows_len=%d' % (df.shape[0]))

        # groupby = df.groupby("tbl_eng_name")

        group = dict()
        if df is not None:
            # NOTE dataframe1创立时要补全从静态表读到的信息 FACTORY_DESC,UNIT_CODE,ITEM_ID,ITEM_CHN_NAME,
            #  SECTION_ID,SECTION_NAME,FEATURE_CODE,STATISTICS_TYPE
            # 加几列
            # ——————
            #
            # 把静态表的这部分数据写到dataframe-1里头去？
            # df["newColumn"] = newValue
            df["factory_desc"] = p_feature_info.factory_desc
            df["unit_code"] = p_feature_info.unit_code
            df["item_id"] = p_feature_info.item_id
            df["item_chn_name"] = p_feature_info.item_chn_name
            df["section_id"] = p_feature_info.section_id
            df["section_name"] = p_feature_info.section_name
            df["feature_code"] = p_feature_info.feature_code
            df["statistics_type"] = p_feature_info.statistics_type

            # NOTE 将数据进行分组，key为卷号; 数据为该卷号的某个特性的所有数据。 如果是低频数据，则一组只有1条数据; 高频则一组有多条数据.
            for index, row in df.iterrows():
                entry = STSEntry(p_statistics_type=p_feature_info.statistics_type,
                                 p_field_eng_name=p_feature_info.field_eng_name,
                                 p_dic=row)
                if group.get(entry.entry_coilno) is None:
                    group[entry.entry_coilno] = list()
                group[entry.entry_coilno].append(entry)

            if self.query_sts_cache.get(sql) is None:
                self.query_sts_cache[sql] = (group, df)

        return group, df

    def __query_related_info_4_merge(self,
                                     p_conn_sts=None,
                                     p_feature_info=None,
                                     maxtime3=None,
                                     maxtime4=None):
        # ENTRY_COILNO	入口卷号
        # PROD_COILNO	出口卷号
        # PROD_DATE	生产日期
        # PROD_TURN	生产班别
        # shift 生产班次
        # EXIT_WT	钢卷出口重量

        # BGTASOC608.C608_0000
        table_name = 'BGTASO%s.%s_0000' % (p_feature_info.unit_code, p_feature_info.unit_code)
        self.logger.info('table_name=%s' % table_name)

        if p_feature_info.unit_code in ['C608', 'C708']:
            # C608, C708
            sql = "select " \
                  "entry_coil_no as entry_coilno, " \
                  "delivery_coil_no as prod_coilno, " \
                  "process_end_time as prod_date, " \
                  "crew as prod_turn," \
                  "shift as shift, " \
                  "deliver_weight_act as exit_wt " \
                  "from %s " \
                  % (table_name)
        if p_feature_info.unit_code == 'C502':
            # 502
            sql = "select " \
                  "entcoil as entry_coilno, " \
                  "outcoil as prod_coilno, " \
                  "end_time as prod_date, " \
                  "turn as prod_turn," \
                  "shift as shift, " \
                  "weight as exit_wt " \
                  "from %s " \
                  % (table_name)
        if p_feature_info.unit_code == 'C512':
            # 512
            sql = "select " \
                  "entcoil as entry_coilno, " \
                  "outcoil as prod_coilno, " \
                  "endtime as prod_date, " \
                  "crew as prod_turn," \
                  "shift as shift, " \
                  "out_weight as exit_wt " \
                  "from %s " \
                  % (table_name)

        self.logger.info('df_sts_base sql=%s' % sql)

        if self.related_info_cache.get(sql) is not None:
            return self.related_info_cache.get(sql)

        df = RetryableQuery(p_db_conn=p_conn_sts, p_sql=sql, p_max_times=5).redo()
        df['unit_code'] = p_feature_info.unit_code

        self.logger.info('df_sts_base start dataframe.rows_len=%d' % (df.shape[0]))

        my_list = list()
        # if df is not None:
        #     df["unit_code"] = p_feature_info.unit_code
        #     for index, row in df.iterrows():
        #         my_list.append(ExitPdoDataEntry(p_dic=row, p_parent=None))

        self.related_info_cache[sql] = (my_list, df)

        return my_list, df

    def __do_query_low_upper_limit_from_mes_by_4_merge(self,
                                                       p_conn_mes=None,
                                                       p_feature_info=None,
                                                       maxtime3=None,
                                                       maxtime4=None):
        # NOTE step--3
        # NOTE 查上下限, 本质上是判断某条机器生产的某个卷的某个特性是否在该上下限内, 即某个特性是否合格

        # NOTE 是按出口卷号去查， 一个入口卷号可能对应了多个出口卷号

        """
        数据库3，MES（DB2数据库）
        BG5AM0.TCPCR5A03  取上限下限的表
        通过
        特性代码  入口卷号
        去查cp_item_n_001  下限
        cp_item_n_003   上限
        再用
        参数值 与 下限 上限进行比较
        如果在上下限内  就是合格  1
        不在  就是不合格  0
        然后将记录保存到数据库
        """
        consider = False
        ok = -1
        # NOTE range就mock出来一个， 将它的出钢记号写为一个无意义的数字
        r = Range(p_dic={'st_no': '-9', 'cp_item_n_001': '-9', 'cp_item_n_003': '-9'}, p_mock=True)
        #
        if p_feature_info.unit_code in ['C608', 'C708']:
            # 要改成 但unit_code=C608,C708时是去这个表查CPCR5A.TCPCR5A03
            table_name = 'CPCR5A.TCPCR5A03'
        elif p_feature_info.unit_code == 'C502':
            # 当unit_code=C502时是去这个表查CPCR5A.TCPCR5A01
            table_name = 'CPCR5A.TCPCR5A01'
        elif p_feature_info.unit_code == 'C512':
            # 当unit_code=C512时是去这个表查CPCR5A.TCPCR5A02
            table_name = 'CPCR5A.TCPCR5A02'
        else:
            table_name = ''

        # st_no(出钢记号)这个字段也先取出来,最后写入到数据库用
        # key_prc_char_code  mat_no分别是那个表里的特性代码. 入口卷号
        # 用出口卷号去查那个特征的上限下限
        # 那个mat_no记录的是入口卷号 不是出口卷号。。。

        sql = " select " \
              " st_no, " \
              " cp_item_n_001," \
              " (CASE when cp_item_n_003=-9999 then 9999 else cp_item_n_003 end) as cp_item_n_003, " \
              " unit_code, " \
              " key_prc_char_code as feature_code, " \
              " mat_no as entry_coilno " \
              " from " \
              " %s " \
              " where " \
              " 1=1 " \
              " and unit_code='%s' " \
              % (table_name,
                 p_feature_info.unit_code)
                 

        self.logger.info('mes_low_upper_limit sql=%s' % sql)

        if self.low_upper_limit_cache.get(sql) is not None:
            return self.low_upper_limit_cache.get(sql)

        # try:
        #     df = util.query(conn=p_conn_mes, sql=sql)
        # except Exception as e:
        #     self.logger.error(str(e))
        #     df = None
        df = RetryableQuery(p_db_conn=p_conn_mes, p_sql=sql, p_max_times=5).redo()

        self.logger.info('df_mes start dataframe.rows_len=%d' % (df.shape[0]))
        # for index, row in df.iterrows():
        #     self.logger.info(index)
        #     r = Range(p_dic=row)
        #     self.logger.info('st_no=%s' % r.st_no)
        #     if p_sts_entry.field_value >= r.cp_item_n_001 and p_sts_entry.field_value <= r.cp_item_n_003:
        #         # 合格 1
        #         ok = 1
        #     else:
        #         # 不合格0
        #         ok = 0
        #     consider = True
        #     self.logger.info('field_value=%d, cp_item_n_003=%d, cp_item_n_001=%d' % (
        #         p_sts_entry.field_value, r.cp_item_n_003, r.cp_item_n_001))
        #     self.logger.info('ok=%d' % ok)
        #     break

        self.logger.info('consider={}, ok={}'.format(consider, ok))

        self.low_upper_limit_cache[sql] = (consider, ok, r, df)

        return consider, ok, r, df

    def __do_query_max_end_time_from_mpp_by(self):
        #
        now = datetime.datetime.now()
        oneday = datetime.timedelta(days=1)

        prev_day = now - oneday
        # s = datetime.datetime(year=2020, month=12, day=31, hour=0, minute=0)
        # e = datetime.datetime(year=2021, month=1, day=1, hour=0, minute=0)
        s = datetime.datetime(year=prev_day.year, month=prev_day.month, day=prev_day.day, hour=0, minute=0, second=0)
        e = datetime.datetime(year=now.year, month=now.month, day=now.day, hour=0, minute=0, second=0)
        max_end_time_1 = s.strftime('%Y%m%d%H%M%S')
        max_end_time_2 = e.strftime('%Y%m%d%H%M%S')

        # prev-day, next-day
        maxtime3 = (s - oneday).strftime('%Y%m%d%H%M%S')
        maxtime4 = (e + oneday).strftime('%Y%m%d%H%M%S')

        self.logger.info('final max_end_time=%s' % max_end_time_1)

        return max_end_time_1, max_end_time_2, maxtime3, maxtime4

#
#     step1
#     MPP查静态表
# sql = 'select * from BGTAMAL1.T_DWD_FACT_ZZSC_LZZB_J001 WHERE 1=1 order by item_id'

# step2
# 建立空的dataframe1
# 对step1取的数据进行for循环  一条一条去STS查询
# sql = " select %s as entry_coilno, %s as field_value from %s.%s where 1=1 and %s>='%s' and %s<='%s' " % (
#     p_feature_info.entry_coilno_field,
#     p_feature_info.field_eng_name,
#     p_feature_info.tbl_schema,
#     p_feature_info.tbl_eng_name,
#     p_feature_info.time_field,
#     p_max_end_time_1,
#     p_feature_info.time_field,
#     p_max_end_time_2)
# 得到65个dataframe1，
# dataframe1创立时要补全从静态表读到的信息FACTORY_DESC,UNIT_CODE,ITEM_ID,ITEM_CHN_NAME,SECTION_ID,SECTION_NAME,FEATURE_CODE,STATISTICS_TYPE
#

# step3
# 建立空的dataframe2
# 分别去STS里查询静态表
# table_name = 'BGTASO%s.%s_0000' % (p_feature_info.unit_code, p_feature_info.unit_code)
# self.logger.info('table_name=%s' % table_name)
# if p_feature_info.unit_code in ['C608', 'C708']:
#     # C608, C708
#     sql = "select " \
#           "entry_coil_no as entry_coilno, " \
#           "delivery_coil_no as prod_coilno, " \
#           "process_end_time as prod_date, " \
#           "crew as prod_turn," \
#           "shift as shift, " \
#           "deliver_weight_act as exit_wt " \
#           "from %s " \
#           "where process_end_time>'%s' " \
#           "and process_end_time<='%s' " \
#           % (table_name, maxtime3, maxtime4)
# if p_feature_info.unit_code == 'C502':
#     # 502
#     sql = "select " \
#           "entcoil as entry_coilno, " \
#           "outcoil as prod_coilno, " \
#           "end_time as prod_date, " \
#           "turn as prod_turn," \
#           "shift as shift, " \
#           "weight as exit_wt " \
#           "from %s " \
#           "where end_time>'%s' " \
#           "and end_time<='%s' " \
#           % (table_name, maxtime3, maxtime4)
# if p_feature_info.unit_code == 'C512':
#     # 512
#     sql = "select " \
#           "entcoil as entry_coilno, " \
#           "outcoil as prod_coilno, " \
#           "endtime as prod_date, " \
#           "crew as prod_turn," \
#           "shift as shift, " \
#           "out_weight as exit_wt " \
#           "from %s " \
#           "where endtime>'%s' " \
#           "and endtime<='%s' " \
#           % (table_name, maxtime3, maxtime4)
#
# self.logger.info('df_sts_base sql=%s' % sql)
# 总共有65个dataframe2
#
# step4
# 将dataframe1和dataframe2  合并在一起 得到dataframe3也是65个 on  entry_coilno，unit_code
#
#
#
# step5
# 建立空的dataframe4
#
# 查询完也有65个dataframe4
# 去mes  取上下限
#
# if p_feature_info.unit_code in ['C608', 'C708']:
#     # 要改成 但unit_code=C608,C708时是去这个表查CPCR5A.TCPCR5A03
#     table_name = 'CPCR5A.TCPCR5A03'
# elif p_feature_info.unit_code == 'C502':
#     # 当unit_code=C502时是去这个表查CPCR5A.TCPCR5A01
#     table_name = 'CPCR5A.TCPCR5A01'
# elif p_feature_info.unit_code == 'C512':
#     # 当unit_code=C512时是去这个表查CPCR5A.TCPCR5A02
#     table_name = 'CPCR5A.TCPCR5A02'
# else:
#     table_name = ''
#
#     # st_no(出钢记号)这个字段也先取出来,最后写入到数据库用
#     # key_prc_char_code  mat_no分别是那个表里的特性代码. 入口卷号
#     # 用出口卷号去查那个特征的上限下限
#     # 那个mat_no记录的是入口卷号 不是出口卷号。。。
# sql = " select " \
#       " st_no, " \
#       " cp_item_n_001," \
#       " (CASE when cp_item_n_003=-9999 then 9999 else cp_item_n_003 end) as cp_item_n_003, " \
#       " unit_code, " \
#       " key_prc_char_code as feature_code, " \
#       " mat_no as entry_coilno " \
#       " from " \
#       " %s " \
#       " where " \
#       " 1=1 " \
#       " and unit_code='%s' " \
#       " and rec_create_time>='%s' " \
#       " and rec_create_time<='%s'" \
#       % (table_name,
#          p_feature_info.unit_code,
#          maxtime3,
#          maxtime4)
# step6
#
# 将dataframe3和dataframe4  合并得到dataframe5   on feature_code ，entry_coilno ，unit_code
#
# step7
# 得到65个dataframe5后
# 对每一个都进行一次groupby求和，按照prod_coilno，item_id，对刚刚计算的OK那列进行求和，得到每个卷这个特征的ok总点数QUALIFIED_QTY，和总点数TOTAL_QTY
# 然后得到每个出口卷号一条数据  相当于将高频数据合并了
# 然后增加一列QUALIFIED_RATE=QUALIFIED_QTY/TOTAL_QTY*100
# 然后增加一列QUALIFIED_JUDGE，之前那个多少分是1多少分是2多少分是3
