# !/usr/bin/env python
# coding=utf-8

import os
import time
import csv
import re
import itertools
import xlsxwriter
import shutil
import numpy as np
import pandas as pd
import matplotlib

matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.ticker import MultipleLocator, AutoMinorLocator
from EEReportingDjangoWebLibary.Modules import Common
from scipy import stats
import statsmodels.api as sm
from collections import OrderedDict
import math
from itertools import groupby


class Pic_Info(object):
    def __init__(self, station, item, low, upp, cpk, pic):
        self.station = station
        self.item = item
        self.low = low
        self.upp = upp
        self.cpk = cpk
        self.pic = pic


class Parameters_Info(object):
    def __init__(self, Valid_Data_Count, Limit_Type, Limit_High, Limit_Low, Fail, Fail_High, Fail_Low, NA_Count, Max,
                 Min, Mean, Std_Est, Cpu, Cpl, Cpk):
        self.Valid_Data_Count = Valid_Data_Count
        self.Limit_Type = Limit_Type
        self.Limit_High = Limit_High
        self.Limit_Low = Limit_Low
        self.Fail = Fail
        self.Fail_High = Fail_High
        self.Fail_Low = Fail_Low
        self.NA_Count = NA_Count
        self.Max = Max
        self.Min = Min
        self.Mean = Mean
        self.Std_Est = Std_Est
        self.Cpu = Cpu
        self.Cpl = Cpl
        self.Cpk = Cpk


def judgePassOrFail_old(newItemData, n, bins, CPK, CPU, CPL, uplimit='NA', lowlimit='NA'):
    def normalization(data):
        _sum = sum(data)
        data = data / _sum
        return data

    niu1 = np.mean(newItemData)  # 中值
    sigma1 = np.std(newItemData)  # 標準差
    skew1 = stats.skew(newItemData)  # 偏態

    # =================Remove noise from newItemData=====================
    bin = (max(newItemData) - min(newItemData)) / 30.0  # group count
    if ((skew1 > 1.0 or skew1 < -1.0) and bin != 0) or (
            uplimit in ['NA', 'N/A'] and lowlimit in ['NA', 'N/A'] and bin != 0):
        for x in range(1, 30):
            GoodData = []
            for d in newItemData:
                if d > niu1 - x * bin and d < niu1 + x * bin:
                    GoodData.append(d)
            if float(len(GoodData)) / len(newItemData) > 0.95:
                ItemData = np.array([float(x) for x in GoodData])
                break
            # kkkk增加一個判斷
            if x == 29:
                ItemData = np.array([float(x) for x in newItemData])
    else:
        ItemData = np.array([float(x) for x in newItemData])
    # ===================================================================

    # k, p = stats.normaltest(ItemData)

    niu = np.mean(ItemData)
    sigma = np.std(ItemData)
    skew = stats.skew(ItemData)
    if max(newItemData) == min(newItemData):
        niu1 = min(newItemData)
        sigma = 0.0
        skew = 0.0

    n_index = []
    extra_index0 = []
    extra_index1 = []
    n_data = []
    small_count = 0
    extra_count = 0
    max_n = max(n)
    max_value_index = 0
    for i, b in enumerate(bins):
        if b >= niu - 2 * sigma and b <= niu + 2 * sigma:
            n_index.append(i)
        elif b < niu - 4 * sigma:
            extra_index0.append(i)
        elif b > niu + 4 * sigma:
            extra_index1.append(i)
    extra_index = extra_index0[0:-1]
    extra_index.extend(extra_index1)
    for id, dt in enumerate(n):
        if dt == max_n:
            max_value_index = id
        if id in n_index[0:-1]:
            n_data.append(dt)
            if dt <= max_n * 0.20:
                small_count = small_count + 1
        elif id in extra_index and n_index:
            if (min(n_index) > 10 or max(n_index) < 20) and dt > max_n * 0.025:
                extra_count = extra_count + 1
    small_rate = small_count / float(len(n_data)) if n_data else 0
    nonZero_Count = len([x for x in n if x != 0])

    normal_count = 0
    normal_range = 0
    if max_value_index > 1 and max_value_index < 28:
        for idx, dta in enumerate(n):
            before_dt = n[idx - 1] if idx else 0
            if idx not in extra_index:
                normal_range = normal_range + 1
                if idx <= max_value_index and dta >= before_dt:
                    normal_count = normal_count + 1
                elif idx >= max_value_index and dta <= before_dt:
                    normal_count = normal_count + 1

    kde = sm.nonparametric.KDEUnivariate(ItemData)
    kde.fit()  # Estimate the densities
    # plt.plot(kde.support, kde.density, lw=1, label='KDE from samples', zorder=10, color="y")
    norm_values = stats.norm.pdf(x=kde.support, loc=niu, scale=sigma)
    # plt.plot(kde.support, norm_values, lw=1, label='Norm distribution', zorder=15, color="r")
    Kde_density = normalization(kde.density)
    Norm_values = normalization(norm_values)
    IOU_min = sum(list(map(lambda x: min(x[0], x[1]), zip(Kde_density, Norm_values))))
    IOU_max = sum(list(map(lambda x: max(x[0], x[1]), zip(Kde_density, Norm_values))))
    IOU = IOU_min / IOU_max
    # kkkk增加了超limit的判斷
    Judge = 'W'
    # cpk = 0.0
    # cpl = 0.0
    # cpu = 0.0
    if uplimit not in ['NA', 'N/A'] and lowlimit not in ['NA', 'N/A']:
        UP_limit = float(uplimit)
        LOW_limit = float(lowlimit)
        if max(newItemData) > UP_limit or min(newItemData) < LOW_limit:
            Judge = 'F'
        else:
            # CPU = (USL - niu1) / (3 * sigma1) if sigma > 0 else float('inf')
            # CPL = (niu1 - LSL) / (3 * sigma1) if sigma > 0 else float('inf')
            # CPK = min(CPU, CPL)
            # cpu = str(round(CPU, 2))
            # cpl = str(round(CPL, 2))
            # cpk = str(round(CPK, 2))
            # beyond_Spec_count = len([data for data in newItemData if data < LOW_limit or data > UP_limit])
            all_rate = (max(newItemData) - min(newItemData)) / (
                    UP_limit - LOW_limit) if UP_limit != LOW_limit else float(
                'inf')

            if UP_limit == LOW_limit:
                if nonZero_Count <= 5:
                    Judge = 'P'
            elif UP_limit != LOW_limit:
                Deviate_low = (niu1 - LOW_limit) / (UP_limit - LOW_limit)
                Deviate_up = (UP_limit - niu1) / (UP_limit - LOW_limit)
                if CPK >= 1.67 and not extra_count and (small_rate <= 0.5 or (all_rate < 0.33 and small_rate <= 0.7)):
                    if normal_range and normal_count == normal_range:
                        Judge = 'P'
                    elif IOU >= 0.65:
                        Judge = 'P'
                if nonZero_Count <= 5:
                    Judge = 'P'
                elif Deviate_low < 0.125 or Deviate_up < 0.125:
                    Judge = 'W'

    elif uplimit not in ['NA', 'N/A'] and lowlimit in ['NA', 'N/A']:
        if max(newItemData) > float(uplimit):
            Judge = 'F'
        # UP_limit = float(uplimit)
        # LSL = niu1 - 4 * sigma1
        # CPU = (USL - niu1) / (3 * sigma1) if sigma > 0 else float('inf')
        # CPL = (niu1 - LSL) / (3 * sigma1) if sigma > 0 else float('inf')
        # CPK = min(CPU, CPL)
        # cpu = str(round(CPU, 2))
        # cpl = 'NA'
        # cpk = 'NA'
        # beyond_Spec_count = len([data for data in newItemData if data > UP_limit])
        else:
            if CPU >= 1.67 and not extra_count and small_rate <= 0.5:
                if normal_range and normal_count == normal_range:
                    Judge = 'P'
                elif IOU >= 0.65:
                    Judge = 'P'
            if nonZero_Count <= 5:
                Judge = 'P'

    elif uplimit in ['NA', 'N/A'] and lowlimit not in ['NA', 'N/A']:
        if min(newItemData) < float(lowlimit):
            Judge = 'F'
        # USL = niu1 + 4 * sigma1
        # LOW_limit = float(lowlimit)
        # CPU = (USL - niu1) / (3 * sigma1) if sigma > 0 else float('inf')
        # CPL = (niu1 - LSL) / (3 * sigma1) if sigma > 0 else float('inf')
        # CPK = min(CPU, CPL)
        # cpu = 'NA'
        # cpl = str(round(CPL, 2))
        # cpk = 'NA'
        # beyond_Spec_count = len([data for data in newItemData if data < LOW_limit])
        else:
            if CPL >= 1.67 and not extra_count and small_rate <= 0.5:
                if normal_range and normal_count == normal_range:
                    Judge = 'P'
                elif IOU >= 0.65:
                    Judge = 'P'
            if nonZero_Count <= 5:
                Judge = 'P'

    else:
        # USL = niu1 + 4 * sigma1
        # LSL = niu1 - 4 * sigma1
        # CPU = (USL - niu1) / (3 * sigma1) if sigma > 0 else float('inf')
        # CPL = (niu1 - LSL) / (3 * sigma1) if sigma > 0 else float('inf')
        # CPK = min(CPU, CPL)
        # cpu = 'NA'
        # cpl = 'NA'
        # cpk = 'NA'
        if not extra_count and small_rate <= 0.5:
            if normal_range and normal_count == normal_range:
                Judge = 'P'
            elif IOU >= 0.80:
                Judge = 'P'
        if nonZero_Count <= 5:
            Judge = 'P'

    return Judge


def judgePassOrFail(newItemData, n, bins, CPK, CPU, CPL, uplimit='NA', lowlimit='NA'):
    def normalization(data):
        _sum = sum(data)
        data = data / _sum
        return data

    def calculate_IOU(ItemData, avg, std):
        '''
        IOU 交并比（两个区域的重合率）
        :param ItemData: 画图数据
        :param avg: 均值
        :param std: 标准差
        :return: IOU
        '''

        kde = sm.nonparametric.KDEUnivariate(ItemData)
        kde.fit()  # Estimate the densities
        # plt.plot(kde.support, kde.density, lw=1, label='KDE from samples', zorder=10, color="y")
        norm_values = stats.norm.pdf(x=kde.support, loc=avg, scale=std)
        # plt.plot(kde.support, norm_values, lw=1, label='Norm distribution', zorder=15, color="r")
        Kde_density = normalization(kde.density)
        Norm_values = normalization(norm_values)
        IOU_min = sum(list(map(lambda x: min(x[0], x[1]), zip(Kde_density, Norm_values))))
        IOU_max = sum(list(map(lambda x: max(x[0], x[1]), zip(Kde_density, Norm_values))))
        IOU = IOU_min / IOU_max
        return IOU

    def calculate_multimodal(n, bins, avg, std):
        '''
        计算波峰的个数
        :param n:每个bin的个数
        :param bins:bins的区间
        :param avg:均值
        :param std:标准差
        :return:波峰个数
        '''

        primary_index = []
        extra_index0 = []
        extra_index1 = []
        max_n = max(n)
        max_n_idx = np.argmax(n)  # 最高的柱子的index
        for i, bs in enumerate(bins):
            if bs >= avg - 2 * std and bs <= avg + 2 * std:
                primary_index.append(i)
            elif bs < avg - 4 * std:
                extra_index0.append(i)
            elif bs > avg + 4 * std:
                extra_index1.append(i)
        extra_index = extra_index0[0:-1]
        extra_index.extend(extra_index1)

        allRegion_idx = []
        focusRegion_idx = []
        abnormalList = []
        abnormalBins = 0
        small_bins = 0
        for idx, n_value in enumerate(n):
            if n_value > 0:
                allRegion_idx.append(idx)
            if n_value >= max_n * 0.001:
                focusRegion_idx.append(idx)
            if not idx in primary_index[0:-1]:
                if n_value <= max_n * 0.10 and n_value > max_n * 0.007:
                    small_bins = small_bins + 1
                if idx <= max_n_idx and n_value < n[idx - 1]:
                    abnormalList.append(idx)
                elif idx > max_n_idx and n_value > n[idx - 1]:
                    abnormalList.append(idx)
            if not idx in extra_index:
                if idx <= max_n_idx:
                    before_value = n[idx - 1]
                    if n_value < before_value and (before_value - n_value > max_n * 0.1):
                        abnormalBins = abnormalBins + 1
                elif idx > max_n_idx:
                    before_value = n[idx - 1]
                    if n_value > before_value and (n_value - before_value > max_n * 0.1):
                        abnormalBins = abnormalBins + 1
        peak = False
        smallbins_rate = small_bins / float(max(allRegion_idx) - min(allRegion_idx))
        if abnormalBins > 6 or smallbins_rate >= 0.38:
            peak = True
        else:
            fun = lambda x: x[1] - x[0]
            for k, g in groupby(enumerate(abnormalList), fun):
                Continuous = [j for i, j in g]  # 连续数字的列表
                if len(Continuous) > 2:
                    peak = True

        return peak, focusRegion_idx

    def calculate_tail(focusRegion_idx, bins, avg):
        '''
        计算正态分布图像的小尾巴
        :param focusRegion_idx: 主要数据区间的index
        :param bins: 柱状图bin
        :param avg: 均值
        :return: 有无小尾巴
        '''

        tail = False
        UPP_limit = bins[max(focusRegion_idx)]
        LOW_limit = bins[min(focusRegion_idx)]
        Deviate_low = (avg - LOW_limit) / (UPP_limit - LOW_limit)
        Deviate_up = (UPP_limit - avg) / (UPP_limit - LOW_limit)

        if Deviate_low < 0.125 or Deviate_up < 0.125:
            tail = True
        return tail

    # 所有data都是定值，且在limit内，则为pass
    MAX_data = max(newItemData)
    MIN_data = min(newItemData)
    STD = np.std(newItemData)  # 标准差
    Judge = 'P'
    if MAX_data == MIN_data:
        if uplimit not in ['NA', 'N/A'] and MAX_data > float(uplimit):  # 有上线
            Judge = 'F'
        if lowlimit not in ['NA', 'N/A'] and MIN_data < float(lowlimit):
            Judge = 'F'
        return Judge

    else:
        ItemData = np.array([float(x) for x in newItemData])
        avg = np.mean(ItemData)
        std = np.std(ItemData)
        # MAX_data = max(ItemData)
        # MIN_data = min(ItemData)

        # nonZero_Count = len([x for x in n if x != 0])  # 非零柱子的个数
        peak, focusRegion_idx = calculate_multimodal(n, bins, avg, std)
        IOU = calculate_IOU(ItemData, avg, std)
        # nonZero_Count = len(focusRegion_idx)

        if uplimit not in ['NA', 'N/A'] and lowlimit not in ['NA', 'N/A']:  # 上下限
            tail = calculate_tail(focusRegion_idx, bins, avg)
            UPP_limit = float(uplimit)
            LOW_limit = float(lowlimit)
            ALL_Ratio = float(MAX_data - MIN_data) / float(UPP_limit - LOW_limit) if UPP_limit != LOW_limit else 0
            # Deviate_low = (avg - LOW_limit) / (UPP_limit - LOW_limit)
            # Deviate_up = (UPP_limit - avg) / (UPP_limit - LOW_limit)
            if UPP_limit == LOW_limit:
                Judge = 'F'
            elif (MAX_data > float(uplimit)) or (MIN_data < float(lowlimit)) or (STD > 10 and CPK < 1.33):
                Judge = 'F'
            elif CPK < 1.67:
                Judge = 'W'
            elif ALL_Ratio > 0.21 and (peak or tail):
                Judge = 'W'
            else:
                pass
        elif uplimit not in ['NA', 'N/A'] and lowlimit in ['NA', 'N/A']:  # 上限
            tail = calculate_tail(focusRegion_idx, bins, avg)
            UPP_limit = float(uplimit)
            LOW_limit = MIN_data
            ALL_Ratio = float(MAX_data - MIN_data) / float(UPP_limit - LOW_limit) if UPP_limit != LOW_limit else 0
            if (MAX_data > float(uplimit)) or (STD > 10 and CPU < 1.33):
                Judge = 'F'
            elif CPU < 1.67:
                Judge = 'W'
            elif ALL_Ratio > 0.25 and (peak or IOU < 0.65 or tail):
                Judge = 'W'
            else:
                pass
        elif uplimit in ['NA', 'N/A'] and lowlimit not in ['NA', 'N/A']:  # 下限
            tail = calculate_tail(focusRegion_idx, bins, avg)
            UPP_limit = MAX_data
            LOW_limit = float(lowlimit)
            ALL_Ratio = float(MAX_data - MIN_data) / float(UPP_limit - LOW_limit) if UPP_limit != LOW_limit else 0
            if (MIN_data < float(lowlimit)) or (STD > 10 and CPL < 1.33):
                Judge = 'F'
            elif CPL < 1.67:
                Judge = 'W'
            elif ALL_Ratio > 0.25 and (peak or IOU < 0.65 or tail):
                Judge = 'W'
            else:
                pass
        else:  # 无上下限
            tail = calculate_tail(focusRegion_idx, bins, avg)
            if peak or IOU < 0.8 or tail:
                Judge = 'W'
            else:
                pass
        return Judge


def SendEmail(to_addrs, details, attachment):
    import smtplib
    from email.mime.text import MIMEText
    from email.mime.multipart import MIMEMultipart
    from email.mime.application import MIMEApplication
    server = '172.28.130.15'
    username = "PSH\\BU3_ATS_SCM"
    password = "suzhouf8@gitmail"
    from_addr = "BU3_ATS_SCM@intra.pegatroncorp.com"

    filesize = os.path.getsize(attachment) / (1024 * 1024)
    htmlBody = "<B>Hi there,<br/><br/>Please have the ATS Distribution Curve Report generated for details as below: <br/><br/>"
    htmlBody += "        >>> The content: <br/><br/>{0}<br/><br/><<<        <br/><br/>".format(details)
    if filesize > 10:
        root_path = os.path.dirname(os.path.dirname(attachment))
        new_filepath = os.path.join(root_path, os.path.basename(attachment))
        shutil.copyfile(attachment, new_filepath)
        url = "http://172.28.146.145/" + '/'.join(new_filepath.split(os.sep)[3:])
        htmlBody += "Donwload Link URL: <a href={0}>{1}</a><br/><br/><br/>".format(url, os.path.basename(attachment))
    htmlBody += "*** This is an automatically generated email, please do not reply ***<br/><br/><br/></B>"

    msg = MIMEMultipart()
    msg['Subject'] = "[ATS Data Analysis Tool][master][GenerateReportNotification]"
    msg['X-Priority'] = '1'
    msg['X-MSMail-Priority'] = 'High'
    msg['From'] = from_addr
    msg['To'] = ';'.join(to_addrs)

    msg.attach(MIMEText(htmlBody, 'html'))

    if filesize <= 10:
        att = MIMEApplication(open(attachment, 'rb').read())
        att.add_header('Content-Disposition', 'attachment', filename=os.path.basename(attachment))
        msg.attach(att)

    client = smtplib.SMTP()
    client.connect(server)
    client.login(username, password)
    client.sendmail(from_addr, to_addrs, msg.as_string())
    client.close()


def calculate_Pic_Parameters(item_values, upper_limit, lower_limit, all_count, value_count):
    Limit_High = str(upper_limit)
    Limit_Low = str(lower_limit)

    Min = item_values.min()
    Max = item_values.max()
    Mean = item_values.mean()
    Std_Est = item_values.std()

    if Min == Max:
        Mean = Min
        Std_Est = 0.0

    cpu = 0.0
    cpl = 0.0
    cpk = 0.0
    if lower_limit not in ['NA', 'N/A'] and upper_limit not in ['NA', 'N/A']:
        Limit_Type = 'Lower Upper'
        LSL = float(lower_limit)
        USL = float(upper_limit)
        limit_value = [LSL, USL]

        fail_low_count = item_values[item_values < LSL].count()
        fail_low_rate = round((float(fail_low_count) / float(all_count)) * 100, 2)
        fail_up_count = item_values[item_values > USL].count()
        fail_up_rate = round((float(fail_up_count) / float(all_count)) * 100, 2)

        cpu = (USL - Mean) / (3 * Std_Est) if Std_Est > 0 else float('inf')
        cpl = (Mean - LSL) / (3 * Std_Est) if Std_Est > 0 else float('inf')
        cpk = min(cpu, cpl)
        Cpu = str(round(cpu, 2))
        Cpl = str(round(cpl, 2))
        Cpk = str(round(cpk, 2))
    elif lower_limit in ['NA', 'N/A'] and upper_limit not in ['NA', 'N/A']:
        Limit_Type = 'Upper'
        USL = float(upper_limit)
        limit_value = [USL]

        fail_low_count = 0
        fail_low_rate = 0.00
        fail_up_count = item_values[item_values > USL].count()
        fail_up_rate = round((float(fail_up_count) / float(all_count)) * 100, 2)

        cpu = (USL - Mean) / (3 * Std_Est) if Std_Est > 0 else float('inf')
        Cpu = str(round(cpu, 2))
        Cpl = 'NA'
        Cpk = 'NA'
    elif lower_limit not in ['NA', 'N/A'] and upper_limit in ['NA', 'N/A']:
        Limit_Type = 'Lower'
        LSL = float(lower_limit)
        limit_value = [LSL]

        fail_low_count = item_values[item_values < LSL].count()
        fail_low_rate = round((float(fail_low_count) / float(all_count)) * 100, 2)
        fail_up_count = 0
        fail_up_rate = 0.00

        cpl = (Mean - LSL) / (3 * Std_Est) if Std_Est > 0 else float('inf')
        Cpu = 'NA'
        Cpl = str(round(cpl, 2))
        Cpk = 'NA'
    else:
        Limit_Type = 'Info Only'
        limit_value = []

        fail_low_count = 0
        fail_low_rate = 0.00
        fail_up_count = 0
        fail_up_rate = 0.00

        Cpu = 'NA'
        Cpl = 'NA'
        Cpk = 'NA'
    Fail_High = str(fail_up_count) + ': % ' + str(fail_up_rate)
    Fail_Low = str(fail_low_count) + ': % ' + str(fail_low_rate)

    fail_count = fail_low_count + fail_up_count
    fail_rate = round((float(fail_count) / float(all_count)) * 100, 2)
    Fail = str(fail_count) + ': % ' + str(fail_rate)

    na_count = all_count - value_count
    na_rate = round((float(na_count) / float(all_count)) * 100, 2)
    NA_Count = str(na_count) + ': % ' + str(na_rate)

    Valid_Data_Count = value_count - fail_count

    parameters_info = Parameters_Info(Valid_Data_Count, Limit_Type, Limit_High, Limit_Low, Fail, Fail_High, Fail_Low,
                                      NA_Count, Max, Min, Mean, Std_Est, Cpu, Cpl, Cpk)

    return cpk, cpu, cpl, parameters_info


def Grubbs(X, alpha, upper_limit, lower_limit):
    original_X = X.copy(deep=True)
    N = len(X)
    t_crit = lambda N: stats.t.isf(alpha / N, N - 2)
    thresh = lambda N: (N - 1.) / math.sqrt(N) * math.sqrt(t_crit(N) ** 2 / (N - 2 + t_crit(N) ** 2))
    Gmax = lambda X: (max(X) - np.median(X)) / np.std(X)
    Gmin = lambda X: (np.median(X) - min(X)) / np.std(X)

    outliers = []
    max_sign = True
    min_sign = True
    G_max = Gmax(X)
    G_min = Gmin(X)
    while (max_sign and G_max > thresh(N)) or (min_sign and G_min > thresh(N)):
        MAX_idx = X.idxmax()
        MIN_idx = X.idxmin()
        if max_sign and G_max > thresh(N):
            if upper_limit not in ['NA', 'N/A']:
                if float(upper_limit) < X[MAX_idx]:
                    outliers.append(MAX_idx)
                    X.drop(MAX_idx, inplace=True)
                else:
                    max_sign = False
            else:
                outliers.append(MAX_idx)
                X.drop(MAX_idx, inplace=True)
        else:
            max_sign = False
        if min_sign and G_min > thresh(N):
            if lower_limit not in ['NA', 'N/A']:
                if float(lower_limit) > X[MIN_idx]:
                    outliers.append(MIN_idx)
                    X.drop(MIN_idx, inplace=True)
                else:
                    min_sign = False
            else:
                outliers.append(MIN_idx)
                X.drop(MIN_idx, inplace=True)
        else:
            min_sign = False
        G_max = Gmax(X)
        G_min = Gmin(X)
        N = len(X)

        if float(len(outliers)) / len(original_X) >= 0.008:
            return original_X, []

    for i in range(len(X)):
        sign = True
        MAX_idx = X.idxmax()
        while Gmax(X) > thresh(len(X)) and upper_limit not in ['NA', 'N/A'] and float(upper_limit) < X[MAX_idx]:
            if float(upper_limit) < X[MAX_idx]:
                outliers.append(MAX_idx)
                X.drop(MAX_idx, inplace=True)
            else:
                break
            MAX_idx = X.idxmax()
            sign = False
            if float(len(outliers)) / len(original_X) >= 0.008:
                return original_X, []

        MIN_idx = X.idxmin()
        while Gmin(X) > thresh(len(X)) and lower_limit not in ['NA', 'N/A'] and float(lower_limit) > X[MIN_idx]:
            if float(lower_limit) > X[MIN_idx]:
                outliers.append(MIN_idx)
                X.drop(MIN_idx, inplace=True)
            else:
                break
            MIN_idx = X.idxmin()
            sign = False
            if float(len(outliers)) / len(original_X) >= 0.008:
                return original_X, []
        if sign:
            break
    if float(len(outliers)) / len(original_X) >= 0.008:
        return original_X, []
    return X, outliers


class ATSDistributionCurveReport(object):
    def __init__(self, callback=None, **kwargs):
        pass

    def run(self, Plist, Template, FileNames, savefileName, kwargs):
        drawbar = kwargs['drawbar']
        drawbox = kwargs['drawbox']
        drawby = kwargs['drawby']
        DataType = kwargs['DataType']
        ccAddress = kwargs['ccAddress']
        GrubbsFilter = kwargs['GrubbsFilter']
        Highlight = kwargs['Highlight']
        test_items = kwargs['test_item']
        details = kwargs['details']
        saveFilePath = os.path.splitext(savefileName)[0]
        if not os.path.exists(saveFilePath):
            os.mkdir(saveFilePath)
        issuepic = os.path.join(saveFilePath,
                                'Highlight_Issue' + '_' + time.strftime("%Y%m%d%H%M%S", time.localtime()) + '.xlsx')

        File_PATH = Common.check_same_station(FileNames)

        station_pic_list = []
        for file in File_PATH:
            SerialNumber = ''
            StartTime = ''
            Uplimit_Index = -1
            Lowlimit_Index = -1
            blank_lines = 0
            SNStartRowIndex = -1
            ItemStartColIndex = -1
            groupby = []
            all_items = []
            with open(file, errors='ignore') as f:
                reader = csv.reader(f, delimiter=',', quotechar='|')
                title = next(reader)
                station = title[0]
                version = title[1]
                for index, row in enumerate(reader):
                    if not any(row):
                        blank_lines = blank_lines + 1
                        continue
                    elif row[0] in ['Site', 'Product'] or re.match(r'Serial\s?Number', row[0]):
                        all_items = row
                        for itm in row[0:15]:
                            if itm in ['SerialNumber', 'Serial Number']:
                                SerialNumber = itm
                            elif itm in ['StartTime', 'Test Start Time', 'startTime']:
                                StartTime = itm
                            elif itm in ['Station ID', 'StationID']:
                                StationID = itm
                                groupby.append(StationID)
                            elif itm == 'BUILD_MATRIX_CONFIG':
                                BUILD_MATRIX_CONFIG = itm
                                groupby.append(BUILD_MATRIX_CONFIG)
                        if (not 'BUILD_MATRIX_CONFIG' in groupby) and ('BUILD_MATRIX_CONFIG' in row):
                            groupby.append('BUILD_MATRIX_CONFIG')
                    elif '----->' in row[0]:
                        if 'Upper Limit' in row[0]:
                            Uplimit_Index = index - blank_lines - 1
                        elif 'Lower Limit' in row[0]:
                            Lowlimit_Index = index - blank_lines - 1
                        else:
                            SNStartRowIndex = index - blank_lines
                            for i in range(1, len(row)):
                                if row[i] != '':
                                    ItemStartColIndex = i
                                    break
                    else:
                        break
            items_old = all_items[ItemStartColIndex:] if not test_items else test_items.rstrip().split(',')
            items = list(filter(lambda x: x and x in all_items,
                                map(lambda x: x.replace('"', '').replace("'", ''), items_old)))

            df_file = pd.read_csv(file, header=1, keep_default_na=True, skip_blank_lines=True, encoding='gb18030')
            df_file.rename(columns=lambda x: x.replace('"', '').replace("'", ''), inplace=True)

            All_Items = list(df_file.columns)
            first_line = [station, version] + [''] * (len(All_Items) - 2)
            df_head = pd.DataFrame(columns=All_Items)
            df_head = df_head.append(dict(zip(All_Items, first_line)), ignore_index=True)
            df_head = df_head.append(dict(zip(All_Items, All_Items)), ignore_index=True)

            df_limit = df_file.iloc[0:SNStartRowIndex]
            df_data = df_file.iloc[SNStartRowIndex::]
            df_data = df_data.dropna(subset=[SerialNumber])

            # # kkkk剔除超過d5倍且小於總數0.05的數據，並保存文件。===============================
            # # region
            # oldpath = os.path.join(stationPath, 'processed data')
            # csv_name = os.path.split(file)[-1]
            # newpath = os.path.join(oldpath, csv_name.split('.')[0])
            # if not os.path.exists(oldpath):
            #     os.makedirs(oldpath)
            # df_file.to_csv(newpath + '.csv', index=0)
            #
            # for item in items:
            #     col_value = df_data[item]
            #     upper_limit = df_file[item][Uplimit_Index]
            #     lower_limit = df_file[item][Lowlimit_Index]
            #     try:
            #         new_col_value = col_value.apply(pd.to_numeric, errors='raise')
            #     except:
            #         continue
            #     new_col_value = new_col_value.dropna(how='any')
            #     mean_k = np.mean(new_col_value)
            #     sigma_k = np.std(new_col_value)
            #
            #     try:
            #         if np.isnan(upper_limit):
            #             upper_limit = 'NA'
            #     except:
            #         pass
            #     try:
            #         if np.isnan(lower_limit):
            #             lower_limit = 'NA'
            #     except:
            #         pass
            #
            #     if upper_limit not in ['NA', 'N/A'] and lower_limit not in ['NA', 'N/A']:
            #         uplimit = float(upper_limit)
            #         lowlimit = float(lower_limit)
            #
            #         multiple5 = []
            #         for d in col_value:
            #             if uplimit > 0 and lowlimit > 0:
            #                 if float(d) > 5 * uplimit or float(d) < 1.0 / 5 * lowlimit:
            #                     multiple5.append(d)
            #             if uplimit < 0:
            #                 if float(d) > 1.0 / 5 * uplimit or float(d) < 5 * lowlimit:
            #                     multiple5.append(d)
            #             if uplimit > 0 and lowlimit < 0:
            #                 if float(d) > 5 * uplimit or float(d) < 5 * lowlimit:
            #                     multiple5.append(d)
            #         if len(multiple5) != 0 and float(len(multiple5)) / float(len(col_value)) < 0.05:
            #             for value in multiple5:
            #                 df_file[item].replace(value, '', inplace=True)
            #
            #             df_file.to_csv(newpath + '.csv', index=0)
            #
            #     elif upper_limit in ['NA', 'N/A'] and lower_limit not in ['NA', 'N/A']:
            #         lowlimit = float(lower_limit)
            #         # print(uplimit, lowlimit)
            #         multiple5 = []
            #         for d in col_value:
            #             if (mean_k + 4 * sigma_k) > 0 and lowlimit > 0:
            #                 if float(d) > 5 * (mean_k + 4 * sigma_k) or float(d) < 1.0 / 5 * lowlimit:
            #                     multiple5.append(d)
            #             if (mean_k + 4 * sigma_k) < 0 and lowlimit < 0:
            #                 if float(d) > 1.0 / 5 * (mean_k + 4 * sigma_k) or float(d) < 5 * lowlimit:
            #                     multiple5.append(d)
            #             if (mean_k + 4 * sigma_k) > 0 and lowlimit < 0:
            #                 if float(d) > 5 * (mean_k + 4 * sigma_k) or float(d) < 5 * lowlimit:
            #                     multiple5.append(d)
            #         if len(multiple5) != 0 and float(len(multiple5)) / float(len(col_value)) < 0.05:
            #             # print(len(multiple5))/float(len(col_value))
            #             for value in multiple5:
            #                 df_file[item].replace(value, '', inplace=True)
            #             df_file.to_csv(newpath + '.csv', index=0)
            #
            #     elif upper_limit not in ['NA', 'N/A'] and lower_limit in ['NA', 'N/A']:
            #         uplimit = float(upper_limit)
            #         multiple5 = []
            #         for d in col_value:
            #             if uplimit > 0 and (mean_k - 4 * sigma_k) > 0:
            #                 if float(d) > 5 * uplimit or float(d) < 1.0 / 5 * (mean_k - 4 * sigma_k):
            #                     multiple5.append(d)
            #             if uplimit < 0 and (mean_k - 4 * sigma_k) < 0:
            #                 if float(d) > 1.0 / 5 * uplimit or float(d) < 5 * (mean_k - 4 * sigma_k):
            #                     multiple5.append(d)
            #             if uplimit > 0 and (mean_k - 4 * sigma_k) < 0:
            #                 if float(d) > 5 * uplimit or float(d) < 5 * (mean_k - 4 * sigma_k):
            #                     multiple5.append(d)
            #
            #         if len(multiple5) != 0 and float(len(multiple5)) / float(len(col_value)) < 0.05:
            #             for value in multiple5:
            #                 df_file[item].replace(value, '', inplace=True)
            #             df_file.to_csv(newpath + '.csv', index=0)
            #
            #     elif upper_limit in ['NA', 'N/A'] and lower_limit in ['NA', 'N/A']:
            #         multiple5 = []
            #         for d in col_value:
            #             if (mean_k + 4 * sigma_k) > 0 and (mean_k - 4 * sigma_k) > 0:
            #                 if float(d) > float(5) * (mean_k + 4 * sigma_k) or float(d) < 1.0 / 5 * (mean_k - 4 * sigma_k):
            #                     multiple5.append(d)
            #             if (mean_k + 4 * sigma_k) < 0 and (mean_k - 4 * sigma_k) < 0:
            #                 if float(d) > float(1.0 / 5) * (mean_k + 4 * sigma_k) or float(d) < 5 * (mean_k - 4 * sigma_k):
            #                     multiple5.append(d)
            #             if (mean_k + 4 * sigma_k) > 0 and (mean_k - 4 * sigma_k) < 0:
            #                 if float(d) > float(5) * (mean_k + 4 * sigma_k) or float(d) < 5 * (mean_k - 4 * sigma_k):
            #                     multiple5.append(d)
            #
            #         if len(multiple5) != 0 and float(len(multiple5)) / float(len(col_value)) < 0.05:
            #             # print(len(multiple5))/float(len(col_value))
            #             for value in multiple5:
            #                 df_file[item].replace(value, '', inplace=True)
            #
            #             df_file.to_csv(newpath + '.csv', index=0)
            #
            # with open(newpath + '.csv', 'r') as f:
            #     read = csv.reader(f)
            #     with open(newpath + '1.csv', 'wb') as f:
            #         write = csv.writer(f)
            #         write.writerow(row0)
            #         write.writerows(read)
            # os.remove(newpath + '.csv')
            # df_file = pd.read_csv(newpath + '1.csv', header=1, keep_default_na=True, skip_blank_lines=True)
            # df_file.rename(columns=lambda x: x.replace('"', '').replace("'", ''), inplace=True)
            # df_data = df_file.iloc[SNStartRowIndex::]
            # df_data = df_data.dropna(subset=[SerialNumber])
            # # endregion
            # # ============================================================================

            if DataType == 'FirstData':
                df_Data = df_data.sort_values(by=[SerialNumber, StartTime], axis=0, ascending=True)
                df_filtered_data = df_Data.drop_duplicates(SerialNumber)
            elif DataType == 'LastData':
                df_Data = df_data.sort_values(by=[SerialNumber, StartTime], axis=0, ascending=False)
                df_filtered_data = df_Data.drop_duplicates(SerialNumber)
            else:
                df_filtered_data = df_data
            starttime = df_filtered_data[StartTime].min()
            endtime = df_filtered_data[StartTime].max()
            cuttime = starttime + '-' + endtime

            if drawbar == 'true':
                barPath = os.path.join(saveFilePath, 'bar')
                if not os.path.exists(barPath):
                    os.mkdir(barPath)
                stationPath = os.path.join(barPath, station)
                if not os.path.exists(stationPath):
                    os.mkdir(stationPath)
                shutil.copy(file, stationPath)

                for item in items:
                    df_item_values = df_filtered_data[item]
                    upper_limit = df_file[item][Uplimit_Index]
                    lower_limit = df_file[item][Lowlimit_Index]
                    try:
                        if np.isnan(upper_limit):
                            upper_limit = 'NA'
                    except:
                        pass
                    try:
                        if np.isnan(lower_limit):
                            lower_limit = 'NA'
                    except:
                        pass
                    try:
                        item_values = df_item_values.apply(pd.to_numeric, errors='raise')
                    except:
                        continue
                    all_count = df_filtered_data[SerialNumber].count()
                    item_values = item_values.dropna(how='any')
                    value_count = item_values.count()
                    if value_count > 0 and GrubbsFilter == 'true' and max(item_values) != min(item_values):
                        item_values, outliers = Grubbs(item_values, 0.05, upper_limit, lower_limit)
                        for i in outliers:
                            df_filtered_data[item][i] = ''
                        value_count = item_values.count()

                    if value_count > 0:
                        # upper_limit = df_file[item][Uplimit_Index]
                        # lower_limit = df_file[item][Lowlimit_Index]
                        cpk, cpu, cpl, parm_info = calculate_Pic_Parameters(item_values, upper_limit, lower_limit,
                                                                            all_count, value_count)
                        type_names = ['Limit Type: ', 'Limit High: ', 'Limit Low: ', 'Fail: ', 'Fail High: ',
                                      'Fail Low: ',
                                      'NA Count: ', 'Max: ', 'Min: ', 'Mean: ', 'Std Est: ',
                                      'Cpu: ', 'Cpl: ', 'Cpk: ']
                        type_values = [parm_info.Limit_Type, parm_info.Limit_High, parm_info.Limit_Low, parm_info.Fail,
                                       parm_info.Fail_High, parm_info.Fail_Low, parm_info.NA_Count,
                                       '%.2f' % parm_info.Max,
                                       '%.2f' % parm_info.Min, '%.2f' % parm_info.Mean, '%.2f' % parm_info.Std_Est,
                                       parm_info.Cpu, parm_info.Cpl, parm_info.Cpk]

                        fig = plt.figure(1, figsize=(8.80, 4.80), facecolor='#BFBFBF')
                        gs = GridSpec(9, 4)  # 将figure 水平分为4等份，垂直分为9等份

                        # area 1
                        plt.subplot(gs[:, 0])
                        plt.text(3, 15.7,
                                 'File Row Count\n %d \n Valid Data Count \n %d' % (
                                     all_count, parm_info.Valid_Data_Count),
                                 size=11, ha='center', va='center',
                                 bbox=dict(boxstyle="Round", ec='none', fc='#B2B2B2'))
                        for y, tname, value in zip(range(16), type_names, type_values):
                            color = 'red' if '%' in str(value) and float(value.split('% ')[1]) > 0 else 'black'
                            plt.text(3, 14 - y - 1, tname, ha='right', va='center', size=10)
                            plt.text(3, 14 - y - 1, value, ha='left', va='center', size=10, color=color)
                        plt.ylim(0, 17)
                        plt.xlim(0, 6)
                        plt.setp(plt.gca(), frame_on=False, xticks=(), yticks=())  # 隐藏坐标轴信息

                        # area 2
                        plt.subplot(gs[0, 1:], facecolor='#BFBFBF')
                        plt.ylim(0, 1)
                        plt.xlim(0, 1)
                        plt.setp(plt.gca(), frame_on=False, xticks=(), yticks=())
                        plt.text(0.5, 0, item, ha='center', va='center', color='blue', fontsize='14')

                        # area 3
                        ax = plt.subplot(gs[1:, 1:], facecolor='#BFBFBF')
                        ax2 = ax.twinx()
                        if upper_limit not in ['NA', 'N/A']:
                            ax.axvline(float(upper_limit), color='r')
                        if lower_limit not in ['NA', 'N/A']:
                            ax.axvline(float(lower_limit), color='r')

                        ns, edgeBin, patches = ax.hist(list(item_values), bins=30, stacked=True, edgecolor='b')
                        judge = judgePassOrFail(item_values, ns, edgeBin, cpk, cpu, cpl, upper_limit, lower_limit)

                        y_ns = np.cumsum(ns, axis=0)
                        total_count = np.sum(ns)
                        ax2.set_ylim(top=110)
                        y2_major_format = FormatStrFormatter('%0.0f%%')
                        ax2.yaxis.set_major_formatter(y2_major_format)
                        ax2_xdata = [(x2 - x1) / 2.0 + x1 for x1, x2 in zip(edgeBin, edgeBin[1:])]
                        ax2_ydata = [l / total_count * 100 for l in y_ns]
                        ax2.plot(ax2_xdata, ax2_ydata, ls='--', color='r')

                        ax.set_title(station + ':many  ' + cuttime, color='blue', fontsize='10')
                        ax.set_ylabel('Bin Count', color='blue')
                        ax.xaxis.set_minor_locator(AutoMinorLocator(5))
                        ax.yaxis.set_minor_locator(AutoMinorLocator(5))
                        plt.xticks(color='blue')
                        plt.yticks(color='blue')
                        plt.grid(axis='y')
                        fig.tight_layout()

                        if judge == 'P':
                            passplot = os.path.join(stationPath, 'pass')
                            if not os.path.exists(passplot):
                                os.mkdir(passplot)
                            nitem = item if '/' not in item else item.replace('/', '&')
                            name = os.path.join(passplot, nitem + '.png')
                            plt.savefig(name, bbox_inches="tight", facecolor='#BFBFBF')
                            plt.close()
                        else:
                            names = []
                            if judge == 'W':
                                wplot = os.path.join(stationPath, 'warning')
                                if not os.path.exists(wplot):
                                    os.mkdir(wplot)
                                nitem = item if '/' not in item else item.replace('/', '&')
                                name = os.path.join(wplot, nitem + '.png')
                                names.append(name)
                                plt.savefig(name, bbox_inches="tight", facecolor='#BFBFBF')

                            if judge == 'F':
                                failplot = os.path.join(stationPath, 'fail')
                                if not os.path.exists(failplot):
                                    os.mkdir(failplot)
                                nitem = item if '/' not in item else item.replace('/', '&')
                                name = os.path.join(failplot, nitem + '.png')
                                names.append(name)
                                plt.savefig(name, bbox_inches="tight", facecolor='#BFBFBF')

                            for gb in groupby:
                                pre_data = df_filtered_data.drop_duplicates(gb)
                                group_data = list(pre_data[gb])
                                if len(group_data) > 10 or (len(group_data) == 1 and pd.isnull(group_data[0])):
                                    continue

                                fig = plt.figure(1, figsize=(8.80, 4.80), facecolor='#BFBFBF')
                                gs = GridSpec(9, 4)  # 将figure 水平分为4等份，垂直分为9等份

                                # area 1
                                plt.subplot(gs[:, 0])
                                plt.text(3, 15.7, 'File Row Count\n %d \n Valid Data Count \n %d' % (
                                    all_count, parm_info.Valid_Data_Count), size=11, ha='center', va='center',
                                         bbox=dict(boxstyle="Round", ec='none', fc='#B2B2B2'))
                                for y, tname, value in zip(range(16), type_names, type_values):
                                    color = 'red' if '%' in str(value) and float(value.split('% ')[1]) > 0 else 'black'
                                    plt.text(3, 14 - y - 1, tname, ha='right', va='center', size=10)
                                    plt.text(3, 14 - y - 1, value, ha='left', va='center', size=10, color=color)
                                plt.ylim(0, 17)
                                plt.xlim(0, 6)
                                plt.setp(plt.gca(), frame_on=False, xticks=(), yticks=())  # 隐藏坐标轴信息

                                # area 2
                                plt.subplot(gs[0, 1:], facecolor='#BFBFBF')
                                plt.ylim(0, 1)
                                plt.xlim(0, 1)
                                plt.setp(plt.gca(), frame_on=False, xticks=(), yticks=())
                                plt.text(0.5, 0, item, ha='center', va='center', color='blue', fontsize='14')

                                # area 3
                                ax = plt.subplot(gs[1:, 1:], facecolor='#BFBFBF')
                                ax2 = ax.twinx()
                                if upper_limit not in ['NA', 'N/A']:
                                    ax.axvline(float(upper_limit), color='r')
                                if lower_limit not in ['NA', 'N/A']:
                                    ax.axvline(float(lower_limit), color='r')

                                df_list = []
                                label_value = []
                                for Key, Value in df_filtered_data.groupby(gb):
                                    df = Value[item].apply(pd.to_numeric, errors='ignore')
                                    df = df.dropna(how='any')
                                    dlist = [x for x in df]
                                    df_list.append(dlist)
                                    str_Key = Key if type(Key) == str else str(int(Key))
                                    label_value.append('x' + str(df.count()) + ' ' + str_Key)

                                ns, edgeBin, patches = ax.hist(df_list, bins=30, histtype='step', stacked=True,
                                                               fill=False, label=label_value)
                                if len(df_list) == 1:
                                    y_ns = np.cumsum(ns, axis=0)
                                else:
                                    y_ns = np.cumsum(np.sum(np.array(ns), axis=0))
                                total_count = np.sum(ns)
                                if not total_count:
                                    continue
                                ax2.set_ylim(top=110)
                                y2_major_format = FormatStrFormatter('%0.0f%%')
                                ax2.yaxis.set_major_formatter(y2_major_format)
                                ax2_xdata = [(x2 - x1) / 2.0 + x1 for x1, x2 in zip(edgeBin, edgeBin[1:])]
                                ax2_ydata = [l / total_count * 100 for l in y_ns]
                                ax2.plot(ax2_xdata, ax2_ydata, ls='--', color='r')

                                ax.legend(title=gb, bbox_to_anchor=(1.11, 0.4, 0.4, 0.102), loc=3, ncol=1,
                                          mode='expand', borderaxespad=0.0, facecolor='#bbbbbb')
                                ax.set_title(station + ':many  ' + cuttime, color='blue', fontsize='10')
                                ax.set_ylabel('Bin Count', color='blue')
                                ax.xaxis.set_minor_locator(AutoMinorLocator(5))
                                ax.yaxis.set_minor_locator(AutoMinorLocator(5))
                                plt.xticks(color='blue')
                                plt.yticks(color='blue')
                                plt.grid(axis='y')
                                fig.tight_layout()

                                grouppath = os.path.join(stationPath, gb)
                                if not os.path.exists(grouppath):
                                    os.mkdir(grouppath)
                                name1 = os.path.join(grouppath, nitem + '_' + gb + '.png')
                                plt.savefig(name1, bbox_inches="tight", facecolor='#BFBFBF')
                                plt.close()
                                names.append(name1)
                            station_pic_list.append(
                                Pic_Info(station, item, lower_limit, upper_limit, parm_info.Cpk, names))

                # 保存筛选数据
                df_ProcessedData = pd.concat([df_head, df_limit, df_filtered_data], ignore_index=True)
                new_file = os.path.join(stationPath, station + '_After data processing.csv')
                df_ProcessedData.to_csv(new_file, sep=',', index=False, header=False)

            if drawbox == 'true':
                boxPath = os.path.join(saveFilePath, 'box')
                if not os.path.exists(boxPath):
                    os.mkdir(boxPath)
                stationPath = os.path.join(boxPath, station)
                if not os.path.exists(stationPath):
                    os.mkdir(stationPath)
                shutil.copy(file, stationPath)

                for item in items:
                    nitem = item if '/' not in item else item.replace('/', '&')
                    name = os.path.join(stationPath, nitem + '.png')
                    df_item_values = df_filtered_data[item]
                    upper_limit = df_file[item][Uplimit_Index]
                    lower_limit = df_file[item][Lowlimit_Index]
                    try:
                        if np.isnan(upper_limit):
                            upper_limit = 'NA'
                    except:
                        pass
                    try:
                        if np.isnan(lower_limit):
                            lower_limit = 'NA'
                    except:
                        pass
                    try:
                        item_values = df_item_values.apply(pd.to_numeric, errors='raise')
                    except:
                        continue
                    # item_values = df_filtered_data.apply(pd.to_numeric, errors='ignore')
                    value_count = item_values.count()
                    if value_count > 0 and GrubbsFilter == 'true' and max(item_values) != min(item_values):
                        item_values, outliers = Grubbs(item_values, 0.05, upper_limit, lower_limit)
                        for i in outliers:
                            df_filtered_data[item][i] = ''
                        value_count = item_values.count()
                    if value_count > 0:
                        df_item_values = df_filtered_data.apply(pd.to_numeric, errors='ignore')
                        df_item_values.boxplot(column=item, by=drawby if drawby != 'None' else None)
                        plt.savefig(name, bbox_inches="tight", facecolor='#BFBFBF')
                        plt.close()

                # 保存筛选数据
                df_ProcessedData = pd.concat([df_head, df_limit, df_filtered_data], ignore_index=True)
                new_file = os.path.join(stationPath, station + '_After data processing.csv')
                df_ProcessedData.to_csv(new_file, sep=',', index=False, header=False)

        # # write High issue excel
        all_fail_pic = []
        if Highlight == 'true':
            if len(station_pic_list) != 0:
                workbook = xlsxwriter.Workbook(issuepic, {'nan_inf_to_errors': True})
                formatter1 = workbook.add_format({'align': 'vcenter', 'border': 1})
                formatter2 = workbook.add_format({'align': 'center', 'border': 1, 'bold': True})
                sh1 = workbook.add_worksheet('issue')
                sh1.write('B2', 'Station Name', formatter2)
                sh1.set_column(1, 1, len('Station Name') * 2.5)
                sh1.write('C2', 'Items', formatter2)
                sh1.set_column(2, 2, len('Items') * 2.5)
                sh1.write('D2', 'Lower Limit', formatter2)
                sh1.set_column(3, 3, len('Lower Limit') * 2.5)
                sh1.write('E2', 'Upper Limit', formatter2)
                sh1.set_column(4, 4, len('Upper Limit') * 2.5)
                sh1.write('F2', 'Cpk(<1.67)', formatter2)
                sh1.set_column(5, 5, len('Cpk(<1.67)') * 2.5)
                sh1.merge_range(1, 6, 1, 8, 'Distributions', formatter2)
                sh1.set_column(6, 6, 118)
                sh1.set_column(7, 7, 159)
                sh1.set_column(8, 8, 159)
                curRow1 = 2

                station_pic_list.sort(key=lambda s: (s.station))
                for k1, g1 in [(k, list(g)) for k, g in itertools.groupby(station_pic_list, lambda s: s.station)]:
                    last_row = curRow1 + len(list(g1)) - 1
                    if last_row == curRow1:
                        sh1.merge_range(curRow1, 1, last_row, 1, k1, formatter1)
                    else:
                        sh1.write_string(curRow1, 1, k1, formatter1)
                    for s_pic in g1:
                        sh1.set_row(curRow1, 350)
                        sh1.write(curRow1, 2, s_pic.item, formatter1)
                        sh1.write(curRow1, 3, s_pic.low, formatter1)
                        sh1.write(curRow1, 4, s_pic.upp, formatter1)
                        sh1.write(curRow1, 5, str(s_pic.cpk), formatter1)
                        curcol1 = 6
                        for sp in s_pic.pic:
                            sh1.insert_image(curRow1, curcol1, sp)
                            curcol1 = curcol1 + 1
                            all_fail_pic.append(sp)
                        curRow1 = curRow1 + 1
                workbook.close()

        if ccAddress and all_fail_pic:
            SendEmail(ccAddress.split(','), details, issuepic)
        shutil.make_archive(saveFilePath, 'zip', saveFilePath)
        shutil.rmtree(saveFilePath)


def main():
    # FileNames = [
    #     r'/Users/mac/Desktop/20190809101905/sa-flex2-Export-ID-115213002909141-2019-07-08T00_00_00-2019-08-08T23_59_59-N104-SA-FLEX2-sa-flex2_AT_1.15.4_PVT-E_00.05_25-signed.csv']
    FileNames = [
        r'/Users/mac/Desktop/D79-COMPASS-CAL - 8.csv']
    # savefileName = r'C:\Users\ATS\Desktop\MikyDistribution\debug\warning-pass_update'
    savefileName = r'/Users/mac/Desktop/ATS_DC'
    DataType = "AllData"
    drawbox = "false"
    GrubbsFilter = 'true'
    Highlight = 'true'
    ccAddress = ""
    drawbar = "true"
    drawby = "None"
    test_items = ''
    # test_items = "POSTCAL_NORM GL W_255_y_delta_25"
    details = "Products:[N104]<br/>Builds:[EVT]<br/>Lines:[]<br/>Stations:[SA-FLEX2]<br/>Cut Time:2019/03/11 16:41:00-2019/03/15 19:16:00"
    start_time = time.clock()
    obj = ATSDistributionCurveReport()
    obj.run(None, None, FileNames, savefileName,
            dict(drawbar=drawbar, drawbox=drawbox, DataType=DataType, drawby=drawby, ccAddress=ccAddress,
                 GrubbsFilter=GrubbsFilter, Highlight=Highlight, test_item=test_items, details=details))
    print('Time = {} second'.format(time.clock() - start_time))


if __name__ == '__main__':
    main()
