#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import sys, datetime, json, logging, os
import pandas as pd
# from apscheduler.schedulers.blocking import BlockingScheduler
import math
from xpy3lib.utils.XDataFrameUtils import XDataFrameUtils
from xpy3lib.XRetryableQuery import XRetryableQuery
from xpy3lib.XRetryableSave import XRetryableSave

from xpy3lib.utils import db_utils as util
from xpy3lib.XLogger import XLogger
from sicost.config import app_config

"""

-------------------------------->> NOTE <<------------------------------------------------ 
Usage:
c512cpc.py
------------------------------------------------------------------------------------------


"""


def main():



    profile = 'dev'
    config = app_config[profile]

    try:
        db_conn_sts = util.getConnectionSTS(config.DB_HOST_STS,
                                             config.DB_PORT_STS,
                                             config.DB_DBNAME_STS,
                                             config.DB_USER_STS,
                                             config.DB_PASSWORD_STS)
        XLogger.getInstance().logger.info('connect db_conn_sts success')
        db_conn_mpp = util.getConnectionDb2(config.DB_HOST_MPP2_DB2,
                                            config.DB_PORT_MPP2_DB2,
                                            config.DB_DBNAME_MPP2_DB2,
                                            config.DB_USER_MPP2_DB2,
                                            config.DB_PASSWORD_MPP2_DB2)
        XLogger.getInstance().logger.info('connect db_conn_mpp success')
        db_conn_mpp3 = util.getConnectionDb2(config.DB_HOST_MPP3_DB2,
                                            config.DB_PORT_MPP3_DB2,
                                            config.DB_DBNAME_MPP3_DB2,
                                            config.DB_USER_MPP3_DB2,
                                            config.DB_PASSWORD_MPP3_DB2)
        XLogger.getInstance().logger.info('connect db_conn_mpp3 success')
    except Exception as e:
        db_conn_sts = None
        db_conn_mpp = None

        XLogger.getInstance().logger.critical(str(e))
    if db_conn_sts is None:
        return
    if db_conn_mpp is None:
        return
    start = datetime.datetime.now()

    p_day_1 = (start - datetime.timedelta(days=1)).strftime("%Y%m%d")
    p_day_2 = (start - datetime.timedelta(days=3)).strftime("%Y%m%d")
    start_time = p_day_2 + '200000'
    end_time = p_day_1 + '200000'
    # start_time = '20220101000000'
    # end_time = '20220115200000'
    sql = " DELETE FROM " \
          " BGTAMSZZ00.T_DWD_FACT_ZZMM_C512_CPC_RECTIFYDEVIATION" \
          " WHERE 1=1 " \
          " AND ENDTIME >= '%s'" \
          " AND ENDTIME < '%s'" % (start_time, end_time)
    print(sql)
    db_conn_mpp.execute(sql)
    sql = " SELECT a.entcoil, " \
          " a.outcoil, " \
          " a.order_no, " \
          " a.steel_qua, " \
          " a.order_thickness, " \
          " a.order_width, " \
          " a.shift, " \
          " a.crew, " \
          " a.starttime, " \
          " a.endtime, " \
          " a.out_thick, " \
          " a.out_width, " \
          " a.out_length, " \
          " a.out_weight, " \
          " a.steel_grade, " \
          " a.annea_code " \
          " FROM " \
          " bgtasoc512.c512_0000 as a " \
          " where a.endtime >='%s'  " \
          " and a.endtime < '%s'  " %(start_time,end_time)
    # self.logger.info(sql)
    print(sql)

    df_0 = XRetryableQuery(p_db_conn=db_conn_sts, p_sql=sql, p_max_times=5).redo()
    success = df_0.empty is False
    if success is False:
        return
    df_0.columns = df_0.columns.str.upper()
    # df_0.drop(['OUTCOIL'], axis=1, inplace=True)
    df_0['OUTCOIL'] = df_0['OUTCOIL'].astype(str)
    l = df_0['OUTCOIL'].values.tolist()
    r = "'%s'" % ("','".join(l))
    df_0['ENTCOIL'] = df_0['ENTCOIL'].astype(str)
    l2 = df_0['ENTCOIL'].values.tolist()
    r2 = "'%s'" % ("','".join(l2))
    df_0['ENTCOIL_SHORT'] = df_0['ENTCOIL'].str[2:]
    df_0['ENTCOIL_SHORT'] = df_0['ENTCOIL_SHORT'].astype(str)
    l3 = df_0['ENTCOIL_SHORT'].values.tolist()
    r3 = "'%s'" % ("','".join(l3))
    # print(df_0)
    sql = " select outcoil as OUTCOIL,AVG(cen_speed) as cen_speed,AVG(stemp_jrf-ftemp_jrf) as stemp_jrf_diff,AVG(stemp_rtf-ftemp_rtf25) as stemp_rtf_diff,AVG(stemp_sf-ftemp_sf) as stemp_sf_diff,AVG(stemp_scs-ftemp_scs) as stemp_scs_diff,AVG(stemp_rcs-ftemp_rcs) as stemp_rcs_diff,AVG(stemp_oas1-ftemp_oas1) as stemp_oas1_diff,AVG(stemp_oas2-ftemp_oas2) as stemp_oas2_diff  " \
          " ,AVG(tent_rtf1) as tent_rtf1 ,AVG(tent_rtf2) as tent_rtf2 , AVG(tent_sf) as tent_sf ,AVG(tent_scs) as tent_scs , AVG(tent_rcs) as tent_rcs ,AVG(tent_oas1) as tent_oas1 ,AVG(tent_oas2) as tent_oas2 ,AVG(tent_fcs) as tent_fcs ,AVG(tent_wq) as tent_wq " \
          " from BGTASOC512.C5122007 where outcoil in (%s) group by outcoil" % (r)
    # sql = " select * " \
    #       " from BGTASOC512.C5122007 where outcoil in (%s) " % (r)

    # self.logger.info(sql)
    print(sql)

    df_new1 = XRetryableQuery(p_db_conn=db_conn_sts, p_sql=sql, p_max_times=5).redo()
    success = df_new1.empty is False
    if success is False:
        return
    df_new1.columns = df_new1.columns.str.upper()
    v = ['OUTCOIL']
    df_0 = pd.merge(df_0, df_new1, on=v, how='left')
    # print(df_0)
    # sql = " select outcoil as OUTCOIL,SG_CODE " \
    #       " from BGTAMAQA.T_ADS_FACT_C512_SPC_INFO where outcoil in (%s) " % (r)
    # self.logger.info(sql)
    sql = " select EXIT_MAT_NO as OUTCOIL,SG_CODE " \
          " from BGTAMSZZQM.T_DWD_FACT_ZZQM_MM01_INFO where UNIT_CODE='C512' and EXIT_MAT_NO in (%s) " % (r)
    print(sql)

    df_new2 = XRetryableQuery(p_db_conn=db_conn_mpp3, p_sql=sql, p_max_times=5).redo()
    success = df_new2.empty is False
    if success is False:
        return
    df_new2.columns = df_new2.columns.str.upper()
    v = ['OUTCOIL']
    df_0 = pd.merge(df_0, df_new2, on=v, how='left')
    # print(df_0)
    sql = " select EXIT_MAT_NO as OUTCOIL,left(right(WP_UNIT_ROUTE_COMBINE_DESC,9),4) as PRE_UNIT_CODE,MAT_TRACK_NO " \
          " from BGTAMSZZQM.T_DWD_FACT_ZZQM_MATERIAL_TREE where UNIT_CODE='C512' and EXIT_MAT_NO in (%s) " % (r)
    # self.logger.info(sql)
    print(sql)

    df_new4 = XRetryableQuery(p_db_conn=db_conn_mpp, p_sql=sql, p_max_times=5).redo()
    success = df_new4.empty is False
    if success is False:
        return
    df_new4.columns = df_new4.columns.str.upper()
    v = ['OUTCOIL']
    df_0 = pd.merge(df_0, df_new4, on=v, how='left')
    df_0['HOUR_OUTPUT'] = df_0['CEN_SPEED'] * 60 * df_0['OUT_THICK'] * df_0['OUT_WIDTH'] * 7.85 / 1000000
    # print(df_0)
    sql = " select EXIT_MAT_NO as ENTCOIL,MAT_TRACK_NO,PRODUCE_END_TIME as BEF_PROC_END_TIME,UNIT_CODE as PRE_UNIT_CODE" \
          " from BGTAMSZZQM.T_DWD_FACT_ZZQM_MATERIAL_TREE where  EXIT_MAT_NO in (%s) " % (r2)
    print(sql)

    df_new5 = XRetryableQuery(p_db_conn=db_conn_mpp, p_sql=sql, p_max_times=5).redo()
    success = df_new5.empty is False
    if success is False:
        return
    df_new5.columns = df_new5.columns.str.upper()
    v = ['MAT_TRACK_NO','ENTCOIL','PRE_UNIT_CODE']
    df_0 = pd.merge(df_0, df_new5, on=v, how='left')
    df_0.drop(['MAT_TRACK_NO'], axis=1, inplace=True)
    # print(df_0)
    sql = " select COILID as ENTCOIL_SHORT,AVG(ROLL_TENSION_DIFF) as ROLL_TENSION_DIFF,AVG(RACKTILTVALUE_5) as RACKTILTVALUE_5 " \
          " from BGTASOC502.T_ODS_C502_FU where  COILID in (%s) group by COILID" % (r3)
    print(sql)

    df_new6 = XRetryableQuery(p_db_conn=db_conn_sts, p_sql=sql, p_max_times=5).redo()
    success = df_new6.empty is False
    if success is True:
        df_new6.columns = df_new6.columns.str.upper()
        v = ['ENTCOIL_SHORT']
        df_0 = pd.merge(df_0, df_new6, on=v, how='left')
    # if success is False:
    #     return
    # df_new6.columns = df_new6.columns.str.upper()
    # v = ['ENTCOIL_SHORT']
    # df_0 = pd.merge(df_0, df_new6, on=v, how='left')
    df_0.drop(['ENTCOIL_SHORT'], axis=1, inplace=True)
    # print(df_0)
    sql = " select outcoil as OUTCOIL,CPC8_HTP,SEQ_NO as SEQ " \
          " from BGTASOC512.C5122009 where  outcoil in (%s) order by OUTCOIL,SEQ_NO" % (r)
    print(sql)

    df_new7 = XRetryableQuery(p_db_conn=db_conn_sts, p_sql=sql, p_max_times=5).redo()
    success = df_new7.empty is False
    if success is False:
        return
    df_new7.columns = df_new7.columns.str.upper()
    inline_frames = list()
    # 按照OUTCOIL进行分组
    v = ['OUTCOIL']
    groups = df_new7.groupby(by=v)
    for tmp_group_name, tmp_group_df in groups:
        # 默认是升序排序(ascending=True),ascending=Falas:降序排序. 默认ascending为True
        tmp_group_df.sort_values(by='SEQ', inplace=True, ascending=True)

        if len(tmp_group_df) < 3:
            # FIXME 如果一个卷的抽样总量小于3行， 则后面该当如何是好?
            continue

        # 一个材料号的第1、2、3，和倒数1、2、3
        top3 = tmp_group_df.head(3)
        tail3 = tmp_group_df.tail(3)

        v_final = ['OUTCOIL', 'CPC8_HTP_TOP_1', 'CPC8_HTP_TOP_2', 'CPC8_HTP_TOP_3', 'CPC8_HTP_TAIL_1',
                   'CPC8_HTP_TAIL_2', 'CPC8_HTP_TAIL_3']
        tmp_dict = dict()
        tmp_dict['OUTCOIL'] = [tmp_group_name]
        tmp_dict['CPC8_HTP_TOP_1'] = [top3.iloc[0]['CPC8_HTP']]
        tmp_dict['CPC8_HTP_TOP_2'] = [top3.iloc[1]['CPC8_HTP']]
        tmp_dict['CPC8_HTP_TOP_3'] = [top3.iloc[2]['CPC8_HTP']]
        tmp_dict['CPC8_HTP_TAIL_1'] = [tail3.iloc[0]['CPC8_HTP']]
        tmp_dict['CPC8_HTP_TAIL_2'] = [tail3.iloc[1]['CPC8_HTP']]
        tmp_dict['CPC8_HTP_TAIL_3'] = [tail3.iloc[2]['CPC8_HTP']]

        tmp_df = pd.DataFrame(tmp_dict, columns=v_final)
        inline_frames.append(tmp_df)
        # print('-' * 10)

    # 最后每组只有一条数据，将7列有用数据作为新EXCEL输出
    final_df = pd.concat(inline_frames)
    v = ['OUTCOIL']
    df_0 = pd.merge(df_0, final_df, on=v, how='left')
    # print(df_0)
    sql = " select coilid as OUTCOIL,seq,cpc_cyl_4 as cpc_dev_4,cpc_cyl_5 as cpc_dev_5,cpc_cyl_6 as cpc_dev_6,cpc_cyl_7  as cpc_dev_7  " \
          " from BGTASOC512.T_ODS_C512_ZP where coilid in (%s) order by coilid,seq" %(r)
    # self.logger.info(sql)
    print(sql)

    df_1 = XRetryableQuery(p_db_conn=db_conn_sts, p_sql=sql, p_max_times=5).redo()
    success = df_1.empty is False
    if success is False:
        return
    df_1.columns = df_1.columns.str.upper()

    print(df_1.columns.values)

    inline_frames = list()

    # 按照卷号得到最大的100个数字的平均值，以及最小100个数的平均值，
    v = ['OUTCOIL']
    groups = df_1.groupby(by=v)
    for tmp_name, tmp_df in groups:
        tmp_data = {'OUTCOIL': [tmp_name]}
        for i in ['CPC_DEV_4', 'CPC_DEV_5', 'CPC_DEV_6', 'CPC_DEV_7']:
            # NOTE 此处绝对不允许修改原始数据，因为后面要用原始数据进行三等分业务逻辑
            tmp_sorted_df = tmp_df.sort_values(by=[i], ascending=False, inplace=False)
            extremum_cpc_dev_n, avg_100_max, avg_100_min = cal_extremum_with_100_rule(p_outcoil=tmp_name,
                                                                                      p_df=tmp_sorted_df, p_proprety=i)
            tmp_data[i] = [extremum_cpc_dev_n]
            # NOTE 三等分并求三个最值
            extremum_head, extremum_waist, extremum_tail \
                = cal_extremun_cpc_dev_n_HWT(p_outcoil=tmp_name, p_df=tmp_df, p_proprety=i)
            tmp_data['{}_{}'.format(i, 'HEAD')] = [extremum_head]
            tmp_data['{}_{}'.format(i, 'WAIST')] = [extremum_waist]
            tmp_data['{}_{}'.format(i, 'TAIL')] = [extremum_tail]

        df_inline = pd.DataFrame(tmp_data)
        inline_frames.append(df_inline)

    # NOTE
    final_df1 = pd.concat(inline_frames)

    v = ['OUTCOIL']
    df_0 = pd.merge(df_0, final_df1, on=v, how='left')


    sql = " select coilid as OUTCOIL,seq,cpc_cyl_8 as cpc_dev_8,cpc_cyl_9 as cpc_dev_9,cpc_cyl_10 as cpc_dev_10,cpc_cyl_11 as cpc_dev_11,cpc_cyl_12 as cpc_dev_12,cpc_cyl_13 as cpc_dev_13,cpc_cyl_14 as cpc_dev_14   " \
          " from BGTASOC512.T_ODS_C512_FU_2 where coilid in (%s) order by coilid,seq" %(r)
    # self.logger.info(sql)
    print(sql)

    df_2 = XRetryableQuery(p_db_conn=db_conn_sts, p_sql=sql, p_max_times=5).redo()
    success = df_2.empty is False
    if success is False:
        return
    df_2.columns = df_2.columns.str.upper()

    print(df_2.columns.values)

    inline_frames = list()

    # 按照卷号得到最大的100个数字的平均值，以及最小100个数的平均值，
    v = ['OUTCOIL']
    groups = df_2.groupby(by=v)
    for tmp_name, tmp_df in groups:
        tmp_data = {'OUTCOIL': [tmp_name]}
        for i in ['CPC_DEV_8', 'CPC_DEV_9', 'CPC_DEV_10', 'CPC_DEV_11', 'CPC_DEV_12', 'CPC_DEV_13', 'CPC_DEV_14']:
            # NOTE 此处绝对不允许修改原始数据，因为后面要用原始数据进行三等分业务逻辑
            tmp_sorted_df = tmp_df.sort_values(by=[i], ascending=False, inplace=False)
            extremum_cpc_dev_n, avg_100_max, avg_100_min = cal_extremum_with_100_rule(p_outcoil=tmp_name,
                                                                                      p_df=tmp_sorted_df, p_proprety=i)
            tmp_data[i] = [extremum_cpc_dev_n]
            # NOTE 三等分并求三个最值
            extremum_head, extremum_waist, extremum_tail \
                = cal_extremun_cpc_dev_n_HWT(p_outcoil=tmp_name, p_df=tmp_df, p_proprety=i)
            tmp_data['{}_{}'.format(i, 'HEAD')] = [extremum_head]
            tmp_data['{}_{}'.format(i, 'WAIST')] = [extremum_waist]
            tmp_data['{}_{}'.format(i, 'TAIL')] = [extremum_tail]

        df_inline = pd.DataFrame(tmp_data)
        inline_frames.append(df_inline)

    # NOTE
    final_df2 = pd.concat(inline_frames)
    # XDataFrameUtils.dataframe2excel(p_dataframe=final_df2, p_file_name="zgh_test_2022_06_22_final.xls",
    #                                 p_override_force=True)

    # return 0
    v = ['OUTCOIL']
    df_0 = pd.merge(df_0, final_df2, on=v, how='left')



    df_0['CPC_DEV_4_MAX'] = df_0['CPC_DEV_4'].abs()
    df_0['CPC_DEV_5_MAX'] = df_0['CPC_DEV_5'].abs()
    df_0['CPC_DEV_6_MAX'] = df_0['CPC_DEV_6'].abs()
    df_0['CPC_DEV_7_MAX'] = df_0['CPC_DEV_7'].abs()
    df_0['CPC_DEV_8_MAX'] = df_0['CPC_DEV_8'].abs()
    df_0['CPC_DEV_9_MAX'] = df_0['CPC_DEV_9'].abs()
    df_0['CPC_DEV_10_MAX'] = df_0['CPC_DEV_10'].abs()
    df_0['CPC_DEV_11_MAX'] = df_0['CPC_DEV_11'].abs()
    df_0['CPC_DEV_12_MAX'] = df_0['CPC_DEV_12'].abs()
    df_0['CPC_DEV_13_MAX'] = df_0['CPC_DEV_13'].abs()
    df_0['CPC_DEV_14_MAX'] = df_0['CPC_DEV_14'].abs()

    def __cal_LEVEL_4(x):

        rst = ''
        if x.CPC_DEV_4_MAX >= 150 * 0.9 :
            rst = '1'
        elif x.CPC_DEV_4_MAX >= 150 * 0.7 and x.CPC_DEV_4_MAX < 150 * 0.9 :
            rst = '2'
        elif x.CPC_DEV_4_MAX >= 150 * 0.5 and x.CPC_DEV_4_MAX < 150 * 0.7 :
            rst = '3'
        elif x.CPC_DEV_4_MAX >= 150 * 0.3 and x.CPC_DEV_4_MAX < 150 * 0.5 :
            rst = '4'
        elif x.CPC_DEV_4_MAX < 150 * 0.3 :
            rst = '5'
        return rst

    df_0['CPC_DEV_4_LV'] = df_0.apply(lambda x: __cal_LEVEL_4(x), axis=1)

    def __cal_LEVEL_5(x):
        rst = ''
        if x.CPC_DEV_5_MAX >= 150 * 0.9 :
            rst = '1'
        elif x.CPC_DEV_5_MAX >= 150 * 0.7 and x.CPC_DEV_5_MAX < 150 * 0.9 :
            rst = '2'
        elif x.CPC_DEV_5_MAX >= 150 * 0.5 and x.CPC_DEV_5_MAX < 150 * 0.7 :
            rst = '3'
        elif x.CPC_DEV_5_MAX >= 150 * 0.3 and x.CPC_DEV_5_MAX < 150 * 0.5 :
            rst = '4'
        elif x.CPC_DEV_5_MAX < 150 * 0.3 :
            rst = '5'
        return rst

    df_0['CPC_DEV_5_LV'] = df_0.apply(lambda x: __cal_LEVEL_5(x), axis=1)
    def __cal_LEVEL_6(x):
        rst = ''
        if x.CPC_DEV_6_MAX >= 150 * 0.9 :
            rst = '1'
        elif x.CPC_DEV_6_MAX >= 150 * 0.7 and x.CPC_DEV_6_MAX < 150 * 0.9 :
            rst = '2'
        elif x.CPC_DEV_6_MAX >= 150 * 0.5 and x.CPC_DEV_6_MAX < 150 * 0.7 :
            rst = '3'
        elif x.CPC_DEV_6_MAX >= 150 * 0.3 and x.CPC_DEV_6_MAX < 150 * 0.5 :
            rst = '4'
        elif x.CPC_DEV_6_MAX < 150 * 0.3 :
            rst = '5'
        return rst

    df_0['CPC_DEV_6_LV'] = df_0.apply(lambda x: __cal_LEVEL_6(x), axis=1)
    def __cal_LEVEL_7(x):
        rst = ''
        if x.CPC_DEV_7_MAX >= 150 * 0.9 :
            rst = '1'
        elif x.CPC_DEV_7_MAX >= 150 * 0.7 and x.CPC_DEV_7_MAX < 150 * 0.9 :
            rst = '2'
        elif x.CPC_DEV_7_MAX >= 150 * 0.5 and x.CPC_DEV_7_MAX < 150 * 0.7 :
            rst = '3'
        elif x.CPC_DEV_7_MAX >= 150 * 0.3 and x.CPC_DEV_7_MAX < 150 * 0.5 :
            rst = '4'
        elif x.CPC_DEV_7_MAX < 150 * 0.3 :
            rst = '5'
        return rst

    df_0['CPC_DEV_7_LV'] = df_0.apply(lambda x: __cal_LEVEL_7(x), axis=1)
    def __cal_LEVEL_8(x):
        rst = ''
        if x.CPC_DEV_8_MAX >= 177 * 0.9 :
            rst = '1'
        elif x.CPC_DEV_8_MAX >= 177 * 0.7 and x.CPC_DEV_8_MAX < 177 * 0.9 :
            rst = '2'
        elif x.CPC_DEV_8_MAX >= 177 * 0.5 and x.CPC_DEV_8_MAX < 177 * 0.7 :
            rst = '3'
        elif x.CPC_DEV_8_MAX >= 177 * 0.3 and x.CPC_DEV_8_MAX < 177 * 0.5 :
            rst = '4'
        elif x.CPC_DEV_8_MAX < 177 * 0.3 :
            rst = '5'
        return rst

    df_0['CPC_DEV_8_LV'] = df_0.apply(lambda x: __cal_LEVEL_8(x), axis=1)
    def __cal_LEVEL_9(x):
        rst = ''
        if x.CPC_DEV_9_MAX >= 177 * 0.9 :
            rst = '1'
        elif x.CPC_DEV_9_MAX >= 177 * 0.7 and x.CPC_DEV_9_MAX < 177 * 0.9 :
            rst = '2'
        elif x.CPC_DEV_9_MAX >= 177 * 0.5 and x.CPC_DEV_9_MAX < 177 * 0.7 :
            rst = '3'
        elif x.CPC_DEV_9_MAX >= 177 * 0.3 and x.CPC_DEV_9_MAX < 177 * 0.5 :
            rst = '4'
        elif x.CPC_DEV_9_MAX < 177 * 0.3 :
            rst = '5'
        return rst

    df_0['CPC_DEV_9_LV'] = df_0.apply(lambda x: __cal_LEVEL_9(x), axis=1)
    def __cal_LEVEL_10(x):
        rst = ''
        if x.CPC_DEV_10_MAX >= 200 * 0.9 :
            rst = '1'
        elif x.CPC_DEV_10_MAX >= 200 * 0.7 and x.CPC_DEV_10_MAX < 200 * 0.9 :
            rst = '2'
        elif x.CPC_DEV_10_MAX >= 200 * 0.5 and x.CPC_DEV_10_MAX < 200 * 0.7 :
            rst = '3'
        elif x.CPC_DEV_10_MAX >= 200 * 0.3 and x.CPC_DEV_10_MAX < 200 * 0.5 :
            rst = '4'
        elif x.CPC_DEV_10_MAX < 200 * 0.3 :
            rst = '5'
        return rst

    df_0['CPC_DEV_10_LV'] = df_0.apply(lambda x: __cal_LEVEL_10(x), axis=1)
    def __cal_LEVEL_11(x):
        rst = ''
        if x.CPC_DEV_11_MAX >= 150 * 0.9 :
            rst = '1'
        elif x.CPC_DEV_11_MAX >= 150 * 0.7 and x.CPC_DEV_11_MAX < 150 * 0.9 :
            rst = '2'
        elif x.CPC_DEV_11_MAX >= 150 * 0.5 and x.CPC_DEV_11_MAX < 150 * 0.7 :
            rst = '3'
        elif x.CPC_DEV_11_MAX >= 150 * 0.3 and x.CPC_DEV_11_MAX < 150 * 0.5 :
            rst = '4'
        elif x.CPC_DEV_11_MAX < 150 * 0.3 :
            rst = '5'
        return rst

    df_0['CPC_DEV_11_LV'] = df_0.apply(lambda x: __cal_LEVEL_11(x), axis=1)
    def __cal_LEVEL_12(x):
        rst = ''
        if x.CPC_DEV_12_MAX >= 200 * 0.9 :
            rst = '1'
        elif x.CPC_DEV_12_MAX >= 200 * 0.7 and x.CPC_DEV_12_MAX < 200 * 0.9 :
            rst = '2'
        elif x.CPC_DEV_12_MAX >= 200 * 0.5 and x.CPC_DEV_12_MAX < 200 * 0.7 :
            rst = '3'
        elif x.CPC_DEV_12_MAX >= 200 * 0.3 and x.CPC_DEV_12_MAX < 200 * 0.5 :
            rst = '4'
        elif x.CPC_DEV_12_MAX < 200 * 0.3 :
            rst = '5'
        return rst

    df_0['CPC_DEV_12_LV'] = df_0.apply(lambda x: __cal_LEVEL_12(x), axis=1)
    def __cal_LEVEL_13(x):
        rst = ''
        if x.CPC_DEV_13_MAX >= 150 * 0.9 :
            rst = '1'
        elif x.CPC_DEV_13_MAX >= 150 * 0.7 and x.CPC_DEV_13_MAX < 150 * 0.9 :
            rst = '2'
        elif x.CPC_DEV_13_MAX >= 150 * 0.5 and x.CPC_DEV_13_MAX < 150 * 0.7 :
            rst = '3'
        elif x.CPC_DEV_13_MAX >= 150 * 0.3 and x.CPC_DEV_13_MAX < 150 * 0.5 :
            rst = '4'
        elif x.CPC_DEV_13_MAX < 150 * 0.3 :
            rst = '5'
        return rst

    df_0['CPC_DEV_13_LV'] = df_0.apply(lambda x: __cal_LEVEL_13(x), axis=1)
    def __cal_LEVEL_14(x):
        rst = ''
        if x.CPC_DEV_14_MAX >= 150 * 0.9 :
            rst = '1'
        elif x.CPC_DEV_14_MAX >= 150 * 0.7 and x.CPC_DEV_14_MAX < 150 * 0.9 :
            rst = '2'
        elif x.CPC_DEV_14_MAX >= 150 * 0.5 and x.CPC_DEV_14_MAX < 150 * 0.7 :
            rst = '3'
        elif x.CPC_DEV_14_MAX >= 150 * 0.3 and x.CPC_DEV_14_MAX < 150 * 0.5 :
            rst = '4'
        elif x.CPC_DEV_14_MAX < 150 * 0.3 :
            rst = '5'
        return rst

    df_0['CPC_DEV_14_LV'] = df_0.apply(lambda x: __cal_LEVEL_14(x), axis=1)
    now = datetime.datetime.now()
    now_1 = now.strftime('%Y%m%d%H%M%S')
    # print(now_1)
    df_0['REC_CREATOR'] = 'bgtamszz00'
    df_0['REC_CREATE_TIME'] = now_1
    df_0.drop(['CPC_DEV_4_MAX'], axis=1, inplace=True)
    df_0.drop(['CPC_DEV_5_MAX'], axis=1, inplace=True)
    df_0.drop(['CPC_DEV_6_MAX'], axis=1, inplace=True)
    df_0.drop(['CPC_DEV_7_MAX'], axis=1, inplace=True)
    df_0.drop(['CPC_DEV_8_MAX'], axis=1, inplace=True)
    df_0.drop(['CPC_DEV_9_MAX'], axis=1, inplace=True)
    df_0.drop(['CPC_DEV_10_MAX'], axis=1, inplace=True)
    df_0.drop(['CPC_DEV_11_MAX'], axis=1, inplace=True)
    df_0.drop(['CPC_DEV_12_MAX'], axis=1, inplace=True)
    df_0.drop(['CPC_DEV_13_MAX'], axis=1, inplace=True)
    df_0.drop(['CPC_DEV_14_MAX'], axis=1, inplace=True)

    df_0.rename(columns={'CPC_DEV_4': 'CPC_CYL_4_MAX'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_5': 'CPC_CYL_5_MAX'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_6': 'CPC_CYL_6_MAX'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_7': 'CPC_CYL_7_MAX'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_8': 'CPC_CYL_8_MAX'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_9': 'CPC_CYL_9_MAX'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_10': 'CPC_CYL_10_MAX'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_11': 'CPC_CYL_11_MAX'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_12': 'CPC_CYL_12_MAX'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_13': 'CPC_CYL_13_MAX'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_14': 'CPC_CYL_14_MAX'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_4_LV': 'CPC_CYL_4_LV'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_5_LV': 'CPC_CYL_5_LV'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_6_LV': 'CPC_CYL_6_LV'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_7_LV': 'CPC_CYL_7_LV'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_8_LV': 'CPC_CYL_8_LV'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_9_LV': 'CPC_CYL_9_LV'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_10_LV': 'CPC_CYL_10_LV'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_11_LV': 'CPC_CYL_11_LV'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_12_LV': 'CPC_CYL_12_LV'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_13_LV': 'CPC_CYL_13_LV'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_14_LV': 'CPC_CYL_14_LV'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_4_HEAD': 'CPC_CYL_4_HEAD'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_5_HEAD': 'CPC_CYL_5_HEAD'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_6_HEAD': 'CPC_CYL_6_HEAD'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_7_HEAD': 'CPC_CYL_7_HEAD'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_8_HEAD': 'CPC_CYL_8_HEAD'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_9_HEAD': 'CPC_CYL_9_HEAD'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_10_HEAD': 'CPC_CYL_10_HEAD'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_11_HEAD': 'CPC_CYL_11_HEAD'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_12_HEAD': 'CPC_CYL_12_HEAD'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_13_HEAD': 'CPC_CYL_13_HEAD'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_14_HEAD': 'CPC_CYL_14_HEAD'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_4_WAIST': 'CPC_CYL_4_MID'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_5_WAIST': 'CPC_CYL_5_MID'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_6_WAIST': 'CPC_CYL_6_MID'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_7_WAIST': 'CPC_CYL_7_MID'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_8_WAIST': 'CPC_CYL_8_MID'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_9_WAIST': 'CPC_CYL_9_MID'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_10_WAIST': 'CPC_CYL_10_MID'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_11_WAIST': 'CPC_CYL_11_MID'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_12_WAIST': 'CPC_CYL_12_MID'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_13_WAIST': 'CPC_CYL_13_MID'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_14_WAIST': 'CPC_CYL_14_MID'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_4_TAIL': 'CPC_CYL_4_TAIL'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_5_TAIL': 'CPC_CYL_5_TAIL'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_6_TAIL': 'CPC_CYL_6_TAIL'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_7_TAIL': 'CPC_CYL_7_TAIL'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_8_TAIL': 'CPC_CYL_8_TAIL'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_9_TAIL': 'CPC_CYL_9_TAIL'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_10_TAIL': 'CPC_CYL_10_TAIL'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_11_TAIL': 'CPC_CYL_11_TAIL'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_12_TAIL': 'CPC_CYL_12_TAIL'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_13_TAIL': 'CPC_CYL_13_TAIL'}, inplace=True)
    df_0.rename(columns={'CPC_DEV_14_TAIL': 'CPC_CYL_14_TAIL'}, inplace=True)
    print(df_0)



    XRetryableSave(p_db_conn=db_conn_mpp, p_table_name='T_DWD_FACT_ZZMM_C512_CPC_RECTIFYDEVIATION', p_schema='BGTAMSZZ00',
                   p_dataframe=df_0,
                   p_max_times=5).redo()


    print('success')

    try:
        util.closeConnection(db_conn_sts)
        util.closeConnection(db_conn_mpp)
        util.closeConnection(db_conn_mpp3)
    except Exception as e:
        XLogger.getInstance().logger.error(str(e))
    print('每天03：30分执行该定时任务')

    pass
def cal_extremun_cpc_dev_n_HWT(p_outcoil=None, p_df=None, p_proprety=None):
    """
    4个CPC_DEV_N,本身最值，前三分之最值，中三分之一最值，后三分之一最值

    按照seq分三份 每份再算一次最值

    这个原始Excel 我是用SQL读的 我可以直接加个order by outcoil，seq
    应该就能保证是按顺序排的
    按照顺序 业务上 就是代表第几米啥情况
    """
    # 有一列是SEQ就是这个卷号的取样排序，将其分为3份，前三分之一，中三分之一，后三分之一，
    # 然后将每个三分之一的最值（最大100最小100比较绝对值得到一个）得到并保存三列
    # p_df.sort_values(by=['SEQ'], ascending=False, inplace=True)
    # extremum_seq = p_df.tail(n=1)['SEQ'].values[0]
    # NOTE 求最值(三等分后，每个小df挨个求最值)
    # 个数/3 然后向上向下取整吧
    tmp_len = p_df.shape[0]
    a = math.floor(tmp_len * 1 / 3)
    b = math.floor(tmp_len * 2 / 3)
    df_head = p_df.iloc[0:a, :]
    df_waist = p_df.iloc[a:b, :]
    df_tail = p_df.iloc[b:tmp_len, :]
    has_invalid_data = False
    try:
        extremum, avg_100_max, avg_100_min = cal_extremum_with_100_rule(p_df=df_head, p_proprety=p_proprety)
        extremum_head = round(extremum)
    except Exception as e:
        print('h outcoil={}, proprety={}, extremum={}, avg_100_max={}, avg_100_min={}'.format(p_outcoil, p_proprety,
                                                                                              extremum, avg_100_max,
                                                                                              avg_100_min),
              file=sys.stderr)
        print(str(e), file=sys.stderr)
        has_invalid_data = True
        extremum_head = math.nan
    try:
        extremum, avg_100_max, avg_100_min = cal_extremum_with_100_rule(p_df=df_waist, p_proprety=p_proprety)
        extremum_waist = round(extremum)
    except Exception as e:
        print('w outcoil={}, proprety={}, extremum={}, avg_100_max={}, avg_100_min={}'.format(p_outcoil, p_proprety,
                                                                                              extremum, avg_100_max,
                                                                                              avg_100_min),
              file=sys.stderr)
        print(str(e), file=sys.stderr)
        has_invalid_data = True
        extremum_waist = math.nan
    try:
        extremum, avg_100_max, avg_100_min = cal_extremum_with_100_rule(p_df=df_tail, p_proprety=p_proprety)
        extremum_tail = round(extremum)
    except Exception as e:
        print('t outcoil={}, proprety={}, extremum={}, avg_100_max={}, avg_100_min={}'.format(p_outcoil, p_proprety,
                                                                                              extremum, avg_100_max,
                                                                                              avg_100_min),
              file=sys.stderr)
        print(str(e), file=sys.stderr)
        has_invalid_data = True
        extremum_tail = math.nan

    return extremum_head, extremum_waist, extremum_tail


def cal_extremum_with_100_rule(p_outcoil=None, p_df=None, p_proprety=None):
    """
    最大的100个数字的平均值，以及最小100个数的平均值，
    再将这两个均值进行绝对值比较，留下绝对值较大的一个作为最值
    """
    # 最大的100个数字的平均值，以及最小100个数的平均值，
    df_100_max = p_df.head(100)
    avg_100_max = df_100_max[p_proprety].mean()

    # 最大的100个数字的平均值，以及最小100个数的平均值，
    df_100_min = p_df.tail(100)
    avg_100_min = df_100_min[p_proprety].mean()

    # 再将这两个均值进行绝对值比较，留下绝对值较大的一个作为p_column_name的最大值
    # NOTE 求最值
    extremum = avg_100_max if abs(avg_100_max) > abs(avg_100_min) else avg_100_min
    return extremum, avg_100_max, avg_100_min

if __name__ == '__main__':
    start = datetime.datetime.now()

    status = main()
    elapsed = float((datetime.datetime.now() - start).seconds)
    print("Time Used 4 All ----->>>> %f seconds" % (elapsed))
