#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import time, sys, datetime, logging
import pandas as pd

import db_utils as util

from app.models.T_DWD_FACT_ZZSC_LZZB_J001 import FeatureInfo, Summary, STSEntry, ExitPdoDataEntry, FactDetail, Range
from app.LoggableObject import LoggableObject


class ColdRollingJob(LoggableObject):
    """

    """
    config = None

    max_end_time_cache = dict()
    query_sts_cache = dict()
    related_info_cache = dict()
    low_upper_limit_cache = dict()

    def __init__(self, p_config=None):
        super(ColdRollingJob, self).__init__()
        self.config = p_config

        self.max_end_time_cache = dict()
        self.query_sts_cache = dict()
        self.related_info_cache = dict()
        self.low_upper_limit_cache = dict()

    def execute(self, max_end_time_1=None, max_end_time_2=None):
        self.logger.info('ColdRollingJob.execute')
        start = datetime.datetime.now()

        self.__do_execute(max_end_time_1=max_end_time_1, max_end_time_2=max_end_time_2)

        elapsed = float((datetime.datetime.now() - start).seconds)
        self.logger.info("Time Used 4 All ----->>>> %f seconds" % (elapsed))

        # NOTE clear cache 为下一次Job
        self.max_end_time_cache = dict()
        self.query_sts_cache = dict()
        self.related_info_cache = dict()
        self.low_upper_limit_cache = dict()
        self.low_upper_limit_cache['t'] = 0

        pass

    def __do_execute(self, max_end_time_1=None, max_end_time_2=None):
        """
        """

        db_conn_mpp = util.getConnectionDb2(self.config.DB_HOST_MPP_DB2_BGBDPROD,
                                            self.config.DB_PORT_MPP_DB2_BGBDPROD,
                                            self.config.DB_DBNAME_MPP_DB2_BGBDPROD,
                                            self.config.DB_USER_MPP_DB2_BGBDPROD,
                                            self.config.DB_PASSWORD_MPP_DB2_BGBDPROD)
        self.logger.info('connect db_conn_mpp success?????????????????')

        db_conn_sts = util.getConnectionSTS(self.config.DB_HOST_STS,
                                            self.config.DB_PORT_STS,
                                            self.config.DB_DBNAME_STS,
                                            self.config.DB_USER_STS,
                                            self.config.DB_PASSWORD_STS)
        self.logger.info('connect db_conn_sts success?????????????????')

        db_conn_mes = util.getConnectionDb2(self.config.DB_HOST_MES_DB2_BG5AM0,
                                            self.config.DB_PORT_MES_DB2_BG5AM0,
                                            self.config.DB_DBNAME_MES_DB2_BG5AM0,
                                            self.config.DB_USER_MES_DB2_BG5AM0,
                                            self.config.DB_PASSWORD_MES_DB2_BG5AM0)
        self.logger.info('connect db_conn_mes success?????????????????')

        # NOTE step-1
        """
        数据库1，MPP数据库（DB2数据库）
        BGTAMAL1.T_DWD_FACT_ZZSC_LZZB_J001  冷轧符合性_静态表
        提取字段
        unit_code, item_id, section_name, feature_code, statistics_type, tbl_schema, tbl_eng_name, field_eng_name，coilno_field，time
        """
        # NOTE 查所有特性(A机器可能具有1,2,3,4,5,6号特性;B机器可能具有2,3,4,5,6,7,8号特性;B机器可能具有5,6,7,8,9号特性......)
        # statistics_type
        # and statistics_type=2
        sql = 'select * from BGTAMAL1.T_DWD_FACT_ZZSC_LZZB_J001 WHERE 1=1 order by item_id'
        self.logger.info('df_mpp_db2 sql=%s' % sql)
        try:
            df = util.query(conn=db_conn_mpp, sql=sql)
        except Exception as e:
            self.logger.info(str(e))
        self.logger.info('------------------------------start dataframe.rows_len=%d' % (df.shape[0]))
        for index, row in df.iterrows():
            self.logger.info('\n\n\n################################################start index=%d' % index)
            feature_info = FeatureInfo(p_dic=row)
            self.__do_process_data(p_conn_mpp=db_conn_mpp,
                                   p_conn_sts=db_conn_sts,
                                   p_conn_mes=db_conn_mes,
                                   p_feature_info=feature_info,
                                   max_end_time_1=max_end_time_1,
                                   max_end_time_2=max_end_time_2)
            # FIXME
            # if index == 1:
            #     break

            self.logger.info('################################################end index=%d\n\n\n' % index)
        self.logger.info('------------------------------end')

        # close
        util.closeConnection(db_conn_mpp)
        util.closeConnection(db_conn_sts)
        util.closeConnection(db_conn_mes)

    def __do_process_data(self,
                          p_conn_mpp=None,
                          p_conn_sts=None,
                          p_conn_mes=None,
                          p_feature_info=None,
                          max_end_time_1=None,
                          max_end_time_2=None):
        """
        其中统计类型statistics_type有两种情况，1代表低频数据统计，2代表高频数据统计
        :return:
        """
        # NOTE step--1.1
        start = datetime.datetime.now()
        if max_end_time_1 is None:
            max_end_time_1, max_end_time_2, maxtime3, maxtime4 = self.__do_query_max_end_time_from_mpp_by(p_conn_mpp=p_conn_mpp,
                                                                                      p_feature_info=p_feature_info)
        elapsed = float((datetime.datetime.now() - start).seconds)
        self.logger.info("Time_Used_4_ __do_query_max_end_time_from_mpp_by----->>>> %f seconds" % (elapsed))

        # NOTE step--2
        # NOTE STS 是查某台机器， 在指定时间内生产出的全部卷？
        # NOTE 假设在这个表里这个时间段内我们查到了有100个卷
        start = datetime.datetime.now()
        sts_dict, dataframe_sts = self.__do_query_sts_db_by(p_conn_sts=p_conn_sts,
                                                            p_feature_info=p_feature_info,
                                                            p_max_end_time_1=max_end_time_1,
                                                            p_max_end_time_2=max_end_time_2)
        # NOTE dataframe_sts == dataframe_1 ????????????????????

        elapsed = float((datetime.datetime.now() - start).seconds)
        self.logger.info("Time_Used_4_ __do_query_sts_db_by----->>>> %f seconds" % (elapsed))
        sts_base_list, dataframe_related = self.__query_related_info_4_merge(p_conn_sts=p_conn_sts,
                                                                             p_feature_info=p_feature_info)
        dataframe_3 = pd.merge(dataframe_sts, dataframe_related, on='coilno', how='left')

        elapsed = float((datetime.datetime.now() - start).seconds)

        self.logger.info(dataframe_3)
        self.logger.info("Time_Used_4_merge_1_2----->>>> %f seconds" % (elapsed))

        prod_group = dict()
        # NOTE 将数据进行分组，key为卷号; 数据为该卷号的某个特性的所有数据。 如果是低频数据，则一组只有1条数据; 高频则一组有多条数据.
        for key, sts_list in sts_dict.items():
            self.logger.info('sts_list start sts_list.len=%d' % (len(sts_list)))

            consider, ok, r, dataframe_limit = self.__do_query_low_upper_limit_from_mes_by_4_merge(
                p_conn_mes=p_conn_mes,
                p_feature_info=p_feature_info,
                p_sts_entry=sts_entry,
                maxtime3=maxtime3,
                maxtime4=maxtime4)

            #  coilno  field_value  prod_coilno  ... prod_turn shift  exit_wt
            #
            #
            # NOTE 用unit_code,key_prc_char_code,mat_no 这三个字段on

            dataframe_5 = pd.merge(dataframe_3, dataframe_limit, on=['unit_code', 'feature_code', 'coilno'],
                                   how='left')

            # 用unit_code,key_prc_char_code,mat_no 这三个字段on

            self.logger.info(dataframe_5)
            self.logger.info('------------------------------------------------------------')

            continue
















            #
            start11 = datetime.datetime.now()
            for index, sts_entry in enumerate(sts_list):
                # NOTE 1个入口卷号，可能对应好几个出口卷号
                start = datetime.datetime.now()
                # sts_base_list = self.__query_related_info(p_conn_sts=p_conn_sts,
                #                                           p_feature_info=p_feature_info,
                #                                           p_sts_entry=sts_entry)
                # NOTE dataframe_2 入口卷号，机组号与dataframe_1 left join ????????????????????????????????

                # NOTE 1和2merge出来的算 dataframe_3

                elapsed = float((datetime.datetime.now() - start).seconds)
                self.logger.info("Time_Used_4_ __query_related_info----->>>> %f seconds" % (elapsed))


                # NOTE skip
                # NOTE skip
                # NOTE skip
                # NOTE skip
                # NOTE skip
                # NOTE skip
                # NOTE skip
                # NOTE skip
                # NOTE skip
                # NOTE skip
                # NOTE skip
                # NOTE skip



                # NOTE 但准确来说应该用下一步关联出的出口卷号进行分组
                # NOTE 因为你取上限下限都是用出口卷号
                # NOTE 而且最后写库记录时也是按照每一个出口卷一条数据
                start222 = datetime.datetime.now()
                sts_base_list = list()
                for index, exit_pdo_entry in enumerate(sts_base_list):
                    # NOTE 每个特性是否合格, 对每一个卷的每一个特性进行判断是否合格
                    # NOTE 按出口卷号去查，每个入口卷号对应了多个出口卷号
                    start = datetime.datetime.now()
                    consider, ok, r = self.__do_query_low_upper_limit_from_mes_by(p_conn_mes=p_conn_mes,
                                                                                  p_feature_info=p_feature_info,
                                                                                  p_sts_entry=sts_entry,
                                                                                  p_exit_pdo_entry=exit_pdo_entry)
                    # # NOTE dataframe_4
                    # 3和4merge 出来dataframe_5,  就是最终的
                    elapsed = float((datetime.datetime.now() - start).seconds)
                    self.logger.info(
                        "Time_Used_4_ __do_query_low_upper_limit_from_mes_by----->>>> %f seconds" % (elapsed))

                    if prod_group.get(exit_pdo_entry.prod_coilno) is None:
                        prod_group[exit_pdo_entry.prod_coilno] = list()
                    my = {'sts_entry': sts_entry,
                          'exit_pdo_entry': exit_pdo_entry,
                          'consider': consider,
                          'ok': ok,
                          'r': r}
                    prod_group[exit_pdo_entry.prod_coilno].append(my)
                elapsed = float((datetime.datetime.now() - start222).seconds)
                self.logger.info("Time_Used_4_ __loop_sts_base_list all----->>>> %f seconds" % (elapsed))
            else:
                self.logger.info('no data in df_sts??????')
            elapsed = float((datetime.datetime.now() - start11).seconds)
            self.logger.info("Time_Used_4_ __loop_sts_list all----->>>> %f seconds" % (elapsed))

        start00 = datetime.datetime.now()
        for prod_coilno, v in prod_group.items():
            self.logger.info('-------->>prod_coilno=%s' % prod_coilno)
            num_ok = 0
            r = None
            for my in v:
                sts_entry = my['sts_entry']
                exit_pdo_data = my['exit_pdo_entry']
                consider = my['consider']
                ok = my['ok']
                r = my['r']
                num_ok += ok
                self.logger.info('-------->>ok=%d' % ok)

            summary = Summary(p_cp_num_total=len(v), p_num_ok=num_ok)
            # FIXME 先强行不允许跳过， 以便走通程序
            # consider = True
            if consider:
                start = datetime.datetime.now()
                self.__save2db(p_conn_mpp=p_conn_mpp,
                               p_conn_sts=p_conn_sts,
                               p_feature_info=p_feature_info,
                               p_sts_entry=sts_entry,
                               p_summary=summary,
                               p_range=r,
                               exit_pdo_data=exit_pdo_data)
                elapsed = float((datetime.datetime.now() - start).seconds)
                self.logger.info("Time_Used_4_ __save2db----->>>> %f seconds" % (elapsed))

        elapsed = float((datetime.datetime.now() - start00).seconds)
        self.logger.info("Time_Used_4_ __save2db all----->>>> %f seconds" % (elapsed))

        #
        return 0

    def __do_query_max_end_time_from_mpp_by(self,
                                            p_conn_mpp=None,
                                            p_feature_info=None):
        """
        从数据库1 ，MPP数据库（DB2数据库）
        BGTAMAL1. T_DWD_FACT_ZZSC_LZZB_0001  指标明细表（本次程序结果表1）
        查询该item_id（指标ID）的最大的END_TIME
        如果没查到最大的END_TIME，则把当前月的一号0点0分0秒作为最大的END_TIME
        """
        # NOTE step--1.1
        sql = " select " \
              " MAX(end_time) AS max_end_time " \
              " from " \
              " BGTAMAL1.T_DWD_FACT_ZZSC_LZZB_0001_TMP " \
              " where " \
              " item_id='%s'" % (p_feature_info.item_id)
        self.logger.info('df_mpp_max_end_time sql=%s' % sql)

        if self.max_end_time_cache.get(sql) is not None:
            return self.max_end_time_cache.get(sql)

        try:
            df = util.query(conn=p_conn_mpp, sql=sql)
        except Exception as e:
            self.logger.info(str(e))
        self.logger.info('df_mpp_max_end_time start dataframe.rows_len=%d' % (df.shape[0]))

        max_end_time_1 = None
        if df.shape[0] > 0:
            max_end_time_1 = df.iloc[0]['max_end_time']
            self.logger.info('if max_end_time=%s' % max_end_time_1)
        if max_end_time_1 is None:
            # 改成11月1号就有数据了
            # 不行就把没查到就取当月1号  改成取上个月1号？
            now = datetime.datetime.now()
            max_end_time_1 = datetime.datetime(now.year, now.month - 1, 1).strftime('%Y%m%d%H%M%S')
            max_end_time_2 = now.strftime('%Y%m%d%H%M%S')

        # FIXME 先直接写死为2020/10月/1日
        now = datetime.datetime.now()
        s = datetime.datetime(year=now.year, month=12, day=23, hour=0, minute=0)
        e = datetime.datetime(year=now.year, month=12, day=24, hour=0, minute=0)
        max_end_time_1 = s.strftime('%Y%m%d%H%M%S')
        max_end_time_2 = e.strftime('%Y%m%d%H%M%S')

        # FIXME 此处有BUG，  prev-day, next-day
        maxtime3 = datetime.datetime(year=now.year, month=12, day=22, hour=0, minute=0).strftime('%Y%m%d%H%M%S'),
        maxtime4 = datetime.datetime(year=now.year, month=12, day=25, hour=0, minute=0).strftime('%Y%m%d%H%M%S')

        self.logger.info('final max_end_time=%s' % max_end_time_1)

        self.max_end_time_cache[sql] = (max_end_time_1, max_end_time_2, maxtime3, maxtime4)

        return max_end_time_1, max_end_time_2, maxtime3, maxtime4

    def __do_query_sts_db_by(self,
                             p_conn_sts=None,
                             p_feature_info=None,
                             p_max_end_time_1=None,
                             p_max_end_time_2=None) -> (dict):
        # NOTE step--2
        """

        NOTE 某台机器某个特性在指定时间段内所有卷的数据
        NOTE sql:
        NOTE select entry_coil_no as coilno, spm_elongation as field_value from SOC708.C708_ENTRY_PDI_DATA where sample_time>='20201001000000'
        NOTE
        NOTE
        NOTE
        where sample_time>='20201001000000'  and sample_time<='20201001010000'
        通过当条记录的源表Schema, 源表英文名称, 去数据库2查询对应的表
        Select 源表中入口卷号字段英文名称 as 入口卷号, 字段英文名称 as 参数值 where 源表中时间字段英文名称>=最大的END_TIME
        """
        # NOTE STS
        # FIXME
        # FIXME 时间比较， 不能数字比较
        sql = " select %s as coilno, %s as field_value from %s.%s where 1=1 and %s>='%s' and %s<='%s' " % (
            p_feature_info.coilno_field,
            p_feature_info.field_eng_name,
            p_feature_info.tbl_schema,
            p_feature_info.tbl_eng_name,
            p_feature_info.time_field,
            p_max_end_time_1,
            p_feature_info.time_field,
            p_max_end_time_2)
        self.logger.info('df_sts sql=%s' % sql)

        if self.query_sts_cache.get(sql) is not None:
            return self.query_sts_cache.get(sql)

        try:
            df = util.query(conn=p_conn_sts, sql=sql)
        except Exception as e:
            df = None
            self.logger.info(str(e))
        self.logger.info('df_sts start dataframe.rows_len=%d' % (df.shape[0]))

        group = dict()
        if df is not None:
            # NOTE 将数据进行分组，key为卷号; 数据为该卷号的某个特性的所有数据。 如果是低频数据，则一组只有1条数据; 高频则一组有多条数据.
            for index, row in df.iterrows():
                entry = STSEntry(p_statistics_type=p_feature_info.statistics_type,
                                 p_field_eng_name=p_feature_info.field_eng_name,
                                 p_dic=row)
                if group.get(entry.coilno) is None:
                    group[entry.coilno] = list()
                group[entry.coilno].append(entry)

            if self.query_sts_cache.get(sql) is None:
                self.query_sts_cache[sql] = (group, df)

        return group, df

    def __query_related_info(self,
                             p_conn_sts=None,
                             p_feature_info=None,
                             p_sts_entry=None):
        # ENTRY_COILNO	入口卷号
        # PROD_COILNO	出口卷号
        # PROD_DATE	生产日期
        # PROD_TURN	生产班别
        # shift 生产班次
        # EXIT_WT	钢卷出口重量

        # BGTASOC608.C608_0000
        table_name = 'BGTASO%s.%s_0000' % (p_feature_info.unit_code, p_feature_info.unit_code)
        self.logger.info('table_name=%s' % table_name)
        if p_feature_info.unit_code in ['C608', 'C708']:
            # C608, C708
            sql = "select " \
                  "entry_coil_no as entry_coilno, " \
                  "delivery_coil_no as prod_coilno, " \
                  "process_end_time as prod_date, " \
                  "crew as prod_turn," \
                  "shift as shift, " \
                  "deliver_weight_act as exit_wt " \
                  "from %s " \
                  "where entry_coil_no='%s' " % (table_name, p_sts_entry.coilno)
        if p_feature_info.unit_code == 'C502':
            # 502
            sql = "select " \
                  "entcoil as entry_coilno, " \
                  "outcoil as prod_coilno, " \
                  "end_time as prod_date, " \
                  "turn as prod_turn," \
                  "shift as shift, " \
                  "weight as exit_wt " \
                  "from %s " \
                  "where entcoil='%s' " % (table_name, p_sts_entry.coilno)
        if p_feature_info.unit_code == 'C512':
            # 512
            sql = "select " \
                  "entcoil as entry_coilno, " \
                  "outcoil as prod_coilno, " \
                  "endtime as prod_date, " \
                  "crew as prod_turn," \
                  "shift as shift, " \
                  "out_weight as exit_wt " \
                  "from %s " \
                  "where entcoil='%s' " % (table_name, p_sts_entry.coilno)

        self.logger.info('df_sts_base sql=%s' % sql)

        if self.related_info_cache.get(sql) is not None:
            return self.related_info_cache.get(sql)

        try:
            df = util.query(conn=p_conn_sts, sql=sql)
        except Exception as e:
            self.logger.info(str(e))
        self.logger.info('df_sts_base start dataframe.rows_len=%d' % (df.shape[0]))
        my_list = list()
        for index, row in df.iterrows():
            my_list.append(ExitPdoDataEntry(p_dic=row, p_parent=p_sts_entry))

        self.related_info_cache[sql] = my_list

        return my_list

    def __query_related_info_4_merge(self,
                                     p_conn_sts=None,
                                     p_feature_info=None):
        # ENTRY_COILNO	入口卷号
        # PROD_COILNO	出口卷号
        # PROD_DATE	生产日期
        # PROD_TURN	生产班别
        # shift 生产班次
        # EXIT_WT	钢卷出口重量

        # BGTASOC608.C608_0000
        table_name = 'BGTASO%s.%s_0000' % (p_feature_info.unit_code, p_feature_info.unit_code)
        self.logger.info('table_name=%s' % table_name)
        if p_feature_info.unit_code in ['C608', 'C708']:
            # C608, C708
            sql = "select " \
                  "entry_coil_no as coilno, " \
                  "delivery_coil_no as prod_coilno, " \
                  "process_end_time as prod_date, " \
                  "crew as prod_turn," \
                  "shift as shift, " \
                  "deliver_weight_act as exit_wt " \
                  "from %s " \
                  "where 1=1" % (table_name)
        if p_feature_info.unit_code == 'C502':
            # 502
            sql = "select " \
                  "entcoil as coilno, " \
                  "outcoil as prod_coilno, " \
                  "end_time as prod_date, " \
                  "turn as prod_turn," \
                  "shift as shift, " \
                  "weight as exit_wt " \
                  "from %s " \
                  "where 1=1 " % (table_name)
        if p_feature_info.unit_code == 'C512':
            # 512
            sql = "select " \
                  "entcoil as coilno, " \
                  "outcoil as prod_coilno, " \
                  "endtime as prod_date, " \
                  "crew as prod_turn," \
                  "shift as shift, " \
                  "out_weight as exit_wt " \
                  "from %s " \
                  "where 1=1 " % (table_name)

        self.logger.info('df_sts_base sql=%s' % sql)

        if self.related_info_cache.get(sql) is not None:
            return self.related_info_cache.get(sql)

        try:
            df = util.query(conn=p_conn_sts, sql=sql)
        except Exception as e:
            self.logger.error(str(e))
            df = None
        self.logger.info('df_sts_base start dataframe.rows_len=%d' % (df.shape[0]))

        my_list = list()
        # if df is not None:
        #     for index, row in df.iterrows():
        #         my_list.append(ExitPdoDataEntry(p_dic=row, p_parent=p_sts_entry))

        self.related_info_cache[sql] = (my_list, df)

        return my_list, df

    def __do_query_low_upper_limit_from_mes_by_4_merge(self,
                                                       p_conn_mes=None,
                                                       p_feature_info=None,
                                                       p_sts_entry=None,
                                                       maxtime3=None,
                                                       maxtime4=None):
        # NOTE step--3
        # NOTE 查上下限, 本质上是判断某条机器生产的某个卷的某个特性是否在该上下限内, 即某个特性是否合格

        # NOTE 是按出口卷号去查， 一个入口卷号可能对应了多个出口卷号

        """
        数据库3，MES（DB2数据库）
        BG5AM0.TCPCR5A03  取上限下限的表
        通过
        特性代码  入口卷号
        去查cp_item_n_001  下限
        cp_item_n_003   上限
        再用
        参数值 与 下限 上限进行比较
        如果在上下限内  就是合格  1
        不在  就是不合格  0
        然后将记录保存到数据库
        """
        consider = False
        ok = -1
        # NOTE range就mock出来一个， 将它的出钢记号写为一个无意义的数字
        r = Range(p_dic={'st_no': '-9', 'cp_item_n_001': '-9', 'cp_item_n_003': '-9'}, p_mock=True)
        #
        if p_feature_info.unit_code in ['C608', 'C708']:
            # 要改成 但unit_code=C608,C708时是去这个表查CPCR5A.TCPCR5A03
            table_name = 'CPCR5A.TCPCR5A03'
        elif p_feature_info.unit_code == 'C502':
            # 当unit_code=C502时是去这个表查CPCR5A.TCPCR5A01
            table_name = 'CPCR5A.TCPCR5A01'
        elif p_feature_info.unit_code == 'C512':
            # 当unit_code=C512时是去这个表查CPCR5A.TCPCR5A02
            table_name = 'CPCR5A.TCPCR5A02'
        else:
            table_name = ''

        # st_no(出钢记号)这个字段也先取出来,最后写入到数据库用
        # key_prc_char_code  mat_no分别是那个表里的特性代码. 入口卷号
        # 用出口卷号去查那个特征的上限下限
        # 那个mat_no记录的是入口卷号 不是出口卷号。。。
        sql = " select " \
              " st_no, " \
              " cp_item_n_001," \
              " (CASE when cp_item_n_003=-9999 then 9999 else cp_item_n_003 end) as cp_item_n_003, " \
              " unit_code, " \
              " key_prc_char_code as feature_code, " \
              " mat_no as coilno " \
              " from " \
              " %s " \
              " where " \
              " 1=1 " \
              " and unit_code='%s' " \
              " and rec_create_time>='%s' " \
              " and rec_create_time<='%s'" \
              % (table_name,
                 p_feature_info.unit_code,
                 maxtime3,
                 maxtime4)

        self.logger.info('mes_low_upper_limit sql=%s' % sql)

        if self.low_upper_limit_cache.get(sql) is not None:
            return self.low_upper_limit_cache.get(sql)

        try:
            df = util.query(conn=p_conn_mes, sql=sql)
        except Exception as e:
            self.logger.error(str(e))
            df = None
        self.logger.info('df_mes start dataframe.rows_len=%d' % (df.shape[0]))
        self.logger.info(df)
        for index, row in df.iterrows():
            self.logger.info(index)
            r = Range(p_dic=row)
            self.logger.info('st_no=%s' % r.st_no)
            if p_sts_entry.field_value >= r.cp_item_n_001 and p_sts_entry.field_value <= r.cp_item_n_003:
                # 合格 1
                ok = 1
            else:
                # 不合格0
                ok = 0
            consider = True
            self.logger.info('field_value=%d, cp_item_n_003=%d, cp_item_n_001=%d' % (
                p_sts_entry.field_value, r.cp_item_n_003, r.cp_item_n_001))
            self.logger.info('ok=%d' % ok)
            break

        self.logger.info('consider={}, ok={}'.format(consider, ok))

        self.low_upper_limit_cache[sql] = (consider, ok, r, df)
        self.low_upper_limit_cache['t'] += 1
        print('tttttt---->>{}'.format(self.low_upper_limit_cache['t']))

        return consider, ok, r, df

    def __do_query_low_upper_limit_from_mes_by(self,
                                               p_conn_mes=None,
                                               p_feature_info=None,
                                               p_sts_entry=None,
                                               p_exit_pdo_entry=None):
        # NOTE step--3
        # NOTE 查上下限, 本质上是判断某条机器生产的某个卷的某个特性是否在该上下限内, 即某个特性是否合格

        # NOTE 是按出口卷号去查， 一个入口卷号可能对应了多个出口卷号

        """
        数据库3，MES（DB2数据库）
        BG5AM0.TCPCR5A03  取上限下限的表
        通过
        特性代码  入口卷号
        去查cp_item_n_001  下限
        cp_item_n_003   上限
        再用
        参数值 与 下限 上限进行比较
        如果在上下限内  就是合格  1
        不在  就是不合格  0
        然后将记录保存到数据库
        """
        consider = False
        ok = -1
        # NOTE range就mock出来一个， 将它的出钢记号写为一个无意义的数字
        r = Range(p_dic={'st_no': '-9', 'cp_item_n_001': '-9', 'cp_item_n_003': '-9'}, p_mock=True)
        #
        if p_feature_info.unit_code in ['C608', 'C708']:
            # 要改成 但unit_code=C608,C708时是去这个表查CPCR5A.TCPCR5A03
            table_name = 'CPCR5A.TCPCR5A03'
        elif p_feature_info.unit_code == 'C502':
            # 当unit_code=C502时是去这个表查CPCR5A.TCPCR5A01
            table_name = 'CPCR5A.TCPCR5A01'
        elif p_feature_info.unit_code == 'C512':
            # 当unit_code=C512时是去这个表查CPCR5A.TCPCR5A02
            table_name = 'CPCR5A.TCPCR5A02'
        else:
            table_name = ''

        # st_no(出钢记号)这个字段也先取出来,最后写入到数据库用
        # key_prc_char_code  mat_no分别是那个表里的特性代码. 入口卷号
        # 用出口卷号去查那个特征的上限下限
        # 那个mat_no记录的是入口卷号 不是出口卷号。。。
        sql = "select " \
              " st_no," \
              " cp_item_n_001," \
              " cp_item_n_003 " \
              "from %s " \
              "where 1=1 " \
              " and unit_code='%s' " \
              " and key_prc_char_code='%s' " \
              " and mat_no='%s'" \
              % (table_name,
                 p_feature_info.unit_code,
                 p_feature_info.feature_code,
                 p_exit_pdo_entry.entry_coilno)
        self.logger.info('mes_low_upper_limit sql=%s' % sql)

        if self.low_upper_limit_cache.get(sql) is not None:
            return self.low_upper_limit_cache.get(sql)

        try:
            df = util.query(conn=p_conn_mes, sql=sql)
        except Exception as e:
            self.logger.info(str(e))
        self.logger.info('df_mes start dataframe.rows_len=%d' % (df.shape[0]))
        self.logger.info(df)
        for index, row in df.iterrows():
            self.logger.info(index)
            r = Range(p_dic=row)
            self.logger.info('st_no=%s' % r.st_no)
            if p_sts_entry.field_value >= r.cp_item_n_001 and p_sts_entry.field_value <= r.cp_item_n_003:
                # 合格 1
                ok = 1
            else:
                # 不合格0
                ok = 0
            consider = True
            self.logger.info('field_value=%d, cp_item_n_003=%d, cp_item_n_001=%d' % (
                p_sts_entry.field_value, r.cp_item_n_003, r.cp_item_n_001))
            self.logger.info('ok=%d' % ok)
            break

        self.logger.info('consider={}, ok={}'.format(consider, ok))

        self.low_upper_limit_cache[sql] = (consider, ok, r)
        self.low_upper_limit_cache['t'] += 1
        print('tttttt---->>{}'.format(self.low_upper_limit_cache['t']))

        return consider, ok, r

    def __save2db(self,
                  p_conn_mpp=None,
                  p_conn_sts=None,
                  p_feature_info=None,
                  p_sts_entry=None,
                  p_summary=None,
                  p_range=None,
                  exit_pdo_data=None):
        # summary = Summary(p_cp_num_total=1, p_num_ok=p_ok)

        self.logger.info('__save2db gogo')
        self.logger.info(p_summary.serialize())

        my_list = []
        detail = FactDetail()
        detail.ITEM_ID = p_feature_info.item_id
        detail.ITEM_CHN_NAME = p_feature_info.item_chn_name
        detail.END_TIME = exit_pdo_data.prod_date
        detail.UNIT_CODE = p_feature_info.unit_code
        detail.PROD_COILNO = exit_pdo_data.prod_coilno
        detail.ENTRY_COILNO = exit_pdo_data.entry_coilno
        # SECTION_ID	工艺段ID
        detail.SECTION_ID = p_feature_info.section_id
        detail.SECTION_NAME = p_feature_info.section_name
        detail.FACTORY_DESC = p_feature_info.factory_desc
        detail.EXIT_WT = exit_pdo_data.exit_wt
        detail.TAPPING_MARK = p_range.st_no
        # CP合格率 DECIMAL
        detail.QUALIFIED_RATE = p_summary.CP_NUM_OK_RATE
        # CP合格点数	INTEGER
        detail.QUALIFIED_QTY = p_summary.CP_NUM_OK
        # CP总点数	INTEGER
        detail.TOTAL_QTY = p_summary.CP_NUM_TOTAL
        # CP判定标记	INTEGER
        # 1：正常（绿色）
        # 2：预警（黄色）
        # 3：异常（红色）
        detail.QUALIFIED_JUDGE = p_summary.CP_FLAG
        # 生产日期    VARCHAR
        # NOTE 生产结束时间的前八位
        t = datetime.datetime.strptime(str(exit_pdo_data.prod_date), '%Y%m%d%H%M%S')
        detail.PROD_DATE = t.strftime('%Y%m%d')
        # 生产月份    VARCHAR
        # NOTE 这个prod_month 变成六位的
        detail.PROD_MONTH = t.strftime('%Y%m')
        # 生产班别	VARCHAR
        # 甲班：1
        # 乙班：2
        # 丙班：3
        # 丁班：4
        # 然后PROD_TURN这里改成一个对应关系 ABCD对应成1234
        detail.PROD_TURN = {'A': '1', 'B': '2', 'C': '3', 'D': '4'}[exit_pdo_data.prod_turn]
        # 记录创建者    VARCHAR
        detail.REC_CREATOR = '----'
        # 记录创建时间    VARCHAR
        detail.REC_CREATE_TIME = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        # 记录修改者    VARCHAR
        detail.REC_REVISOR = '----'
        # 记录修改时间    VARCHAR
        detail.REC_REVISOR_TIME = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        my_list.append(detail.serialize())

        self.logger.info('出钢记号 st_no=%s' % (p_range.st_no))

        self.logger.info('__save2db-1')
        self.logger.info(my_list)
        df = pd.DataFrame(my_list)
        self.logger.info('__save2db-2')
        try:
            # NOTE df格式使engine对表的大小写敏感 切切切切切切切切切切切切切切记记记记记记记记记记记记记记记记记记......
            df.to_sql(name='T_DWD_FACT_ZZSC_LZZB_0001_TMP'.lower(),
                      con=p_conn_mpp,
                      schema='BGTAMAL1'.lower(),
                      index=False,
                      if_exists='append')
        except Exception as e:
            self.logger.info(str(e))
        self.logger.info('__save2db-3')
