#!/usr/local/bin/python3
# -*- coding: utf-8 -*-

"""
@File    : li.py
@Author  : Link
@Time    : 2022/5/15 9:12
@Mark    : 
"""
from typing import Dict, Union, List, Tuple

from PySide2.QtCore import Signal, QObject

import pandas as pd
import numpy as np

from data_core.li import Li
from report_core.openxl_utils.utils import OpenXl
from uitls.func import save_df_to_json, save_obj_to_pickle, get_obj_by_pickle, get_df_by_json
from workspace_core.core_to_analysis_stdf.stdf_variable import GlobalVariable


class SummaryCore:
    """
    使用dataframe来缓存各个文件的id
    1. 每个文件解析完成后, 会有各个Summary和子数据
    2. Summary会经过组合后生成 SummaryDf

        SummaryDf -> by start_time 排序

        | ID  | R | LOT_ID | SB_LOT_ID | FLOW_ID | QTY | PASS | YIELD | PASS_VERIFY | JOB_NAME | START_TIME | FINISH_TIME |
        | ··· | Y | ······ |
        | ··· | N |
        | ···

        df_dict ->
        {
            ID: each_stdf_df -> 列中第一列是ID,
            ID: each_stdf_df,
            ...
        }

        limit_dict ->
        {
            如何选取Limit, 最后传出的只有一个limit, 为省空间需要使用具名元组, 暂时就用一个Limit
        }
    3. SummaryDf 展示在Tree上
        Group By: LOT_ID -> FLOW_ID ? -> SB_LOT_ID ?
            groupby之后 使用min(START_TIME), max(FINISH_TIME), sum(QTY), sum(PASS)
        给到Tree的数据:
            [
                {
                | ID  | LOT_ID | ...
                children:
                    [ {
                    | ID  | LOT_ID | ...
                    }, ... ]
                },
                {
                | ID  | LOT_ID | ...
                children:
                    [ {
                    | ID  | LOT_ID | ...
                    }, ... ]
                }
            ]
    4. 从Tree中拿到IDS, 汇整为NowSummaryDf, 并拿到Group信息后汇整为 GROUP列
        会有两份数据, 1. NowSummaryDf 2. NowDfs->将df_dict中的数据按需求contact起来
    5. 支持多个window来汇整数据
    """
    summary_df = None  # type:Union[pd.DataFrame, None]
    limit_dict = None  # type: Dict[str, GlobalVariable.LimitClass]  # 指向会随着Limit的手动修改而变化
    limit_df = None  # type:Union[pd.DataFrame, None]
    df_dict = None  # type:Union[Dict[int, pd.DataFrame], None]
    ready = False

    def set_data(self, data: tuple):
        """
        后台必然默认传送一个元组, 拆包为三份数据,并且传来的summary_df已经经过排序
        而且这个返回的数据是比较重要的!!!@后期是需要用在服务器缓存中的
        """
        self.summary_df, self.limit_dict, self.df_dict, self.limit_df = data
        self.ready = True
        return self.ready

    def get_summary_tree(self):
        """
        SummaryDf 展示在Tree上
        :return:
        """
        tree_dict_list = list()
        for key, e_df in self.summary_df.groupby("LOT_ID"):  # type:str, pd.DataFrame
            key = str(key)
            qty = e_df["QTY"].sum()
            pass_qty = e_df["PASS"].sum()
            if qty == 0:
                pass_yield = "0.0%"
            else:
                pass_yield = '{}%'.format(round(pass_qty / qty * 100, 2))
            tree_dict = {"LOT_ID": key, "QTY": qty, "PASS": pass_qty, "YIELD": pass_yield,
                         "START_TIME": e_df["START_TIME"].min(), "FINISH_TIME": e_df["FINISH_TIME"].max(),
                         "children": e_df.to_dict(orient="records")}
            tree_dict_list.append(tree_dict)

        return tree_dict_list

    def add_custom_node(self, ids: List[int], new_lot_id: str):
        """
        1. summary_df 中添加行
        2. df_dict 中添加数据
        3. 重新更新get_summary_tree
        """
        if self.summary_df is None:
            return

        summary_info = self.summary_df[self.summary_df.ID.isin(ids)]
        new_id = self.summary_df["ID"].max() + 1

        df_list = []
        for _id in ids:
            t_df = self.df_dict[_id]
            df_list.append(t_df)
        data = pd.concat(df_list)
        data.loc[:, "ID"] = new_id
        self.df_dict[new_id] = data
        qty = len(data)
        pass_qty = len(data[data["FAIL_FLAG"] == 1])
        if qty == 0:
            pass_yield = "0.0%"
        else:
            pass_yield = '{}%'.format(round(pass_qty / qty * 100, 2))
        self.summary_df.loc[len(self.summary_df.index)] = [
            new_id,  # ID
            '~',  # R
            'all',  # PART_FLAG
            new_lot_id,  # LOT_ID
            new_lot_id,  # SB_LOT_ID
            summary_info.SETUP_TIME.min(),  # SETUP_TIME
            summary_info.START_TIME.min(),  # START_TIME
            summary_info.FINISH_TIME.max(),  # FINISH_TIME
            'all',  # FLOW_ID
            'all',  # PART_TYPE
            'all',  # JOB_NAME
            '~',  # TEMPERATURE
            qty,  # QTY
            pass_qty,  # PASS
            pass_yield,  # YIELD
        ]

    def get_df(self, ids: List[int], quick: bool = False, sample_num: int = 1E4):
        """
        只返回数据, 不做计算
        :param sample_num:
        :param ids:
        :param quick:
        :return:
        """
        if quick:
            df_list = []
            for _id in ids:
                t_df = self.df_dict[_id]
                if len(t_df) > sample_num:
                    df_list.append(t_df.sample(sample_num))
                    continue
                df_list.append(t_df)
        else:
            df_list = [self.df_dict[_id] for _id in ids]

        df = pd.concat(df_list)
        df.rename(columns={"0:nan": "0:None"}, inplace=True)
        df.reset_index(drop=True)
        now_summary_df = self.summary_df[self.summary_df.ID.isin(ids)]
        now_summary_df.reset_index(drop=True)
        return df, now_summary_df

    def show_limit_diff(self):
        """
        调用excel来显示Limit之间的差异
        TODO:
            1. 先将单个STDF的数据和Summary Df做数据链接
            2. 看需求
        """
        OpenXl.excel_limit_run(self.summary_df, self.limit_df)


class DfCore(Li):
    """
    对 df_group & front_df_group 以及 front_df 进行一个计算
    TODO: 如果要做PAT模式, 需要的做法:
        1. PAT模式下, 如何获取LSL和USL (第一颗?)
        2. 动态的UL和LL获取
    """
    summary_df = None  # type:pd.DataFrame # 这个不需要修改

    limit_dict = None  # type: Union[Dict[str, GlobalVariable.LimitClass], None]

    front_limit_dict = None  # type: Union[Dict[str, GlobalVariable.LimitClass], None]
    group_cpk_dict = None  # type: Union[Dict[str, Dict[str, object]], None]

    cpk_dict = None  # type: Union[Dict[str, Dict[str, object]], None]  # 用来通过key快速寻找到这个测试项目的limit等信息
    cpk_list = None  # type:list # 用来显示到Table上
    calculation_cpk_signal = Signal(object, object)

    save_file_json = ["df", "front_df", "chart_df", "summary_df", ]
    save_file_objs = ["cpk_dict", "cpk_list"]
    save_file_limit_class = ["limit_dict", "front_limit_dict", ]

    group_params = None
    da_group_params = None
    process_top_row = ("yield", "avg_contact", "cpk_contact", "std_contact")
    process_bot_row = None

    def set_data(self, data: tuple):
        df, summary_df = data
        self.set_new_df(df)
        self.summary_df = summary_df
        self.set_front_df_group(None, None)

    def set_limit_dict(self, limit_dict):
        self.limit_dict = limit_dict
        self.front_limit_dict = limit_dict.copy()

    def set_front_df_group(self, group_params: Union[list, None], da_group_params: Union[list, None]):
        """
        JMP数据细分和内部绘图细分不一样, 内部绘图很垃圾
        :param group_params: 主要用来做JMP绘图的粗分
        :param da_group_params: 主要用来做JMP绘图的细分
        :return:
        """
        if self.summary_df is None:
            return False
        if self.df is None:
            return False
        self.group_params, self.da_group_params = group_params, da_group_params
        if group_params is None:
            temp_column_data = 'ALL'
        else:
            temp_column_data = None
            for index, each in enumerate(group_params):
                if index == 0:
                    temp_column_data = self.summary_df[each].astype(str)
                else:
                    temp_column_data = temp_column_data + "|" + self.summary_df[each].astype(str)
        self.summary_df.loc[:, "GROUP"] = temp_column_data

        if da_group_params is None:
            temp_column_data = '*'
        else:
            temp_column_data = None
            for index, each in enumerate(da_group_params):
                if index == 0:
                    temp_column_data = self.df[each].astype(str)
                else:
                    temp_column_data = temp_column_data + "|" + self.df[each].astype(str)
        self.df.loc[:, "DA_GROUP"] = temp_column_data
        new_front_df = pd.merge(self.summary_df[["ID", "GROUP"]], self.df, how="inner")
        self.set_new_front_df(new_front_df, emit=False)
        return True

    def calculation_cpk(self):
        """
        计算CPK和FAILRate相关
        :return:
        """
        if self.front_df is None:
            return
        if len(self.front_df) == 0:
            self.message.emit("无数据, 可用分析数据Qty==0, 无法解析@")
            return
        self.cpk_list = []
        cpk_dict = dict()

        """top fail计算"""
        first_fail_dict = {}
        for num in self.front_df[self.front_df["FAIL_FLAG"] != 1]["FIRST_FAIL"]:
            if num not in first_fail_dict:
                first_fail_dict[num] = 1
                continue
            first_fail_dict[num] += 1

        temp_df = self.front_df[self.front_df["FAIL_FLAG"] == 1][self.front_limit_dict.keys()]
        _mean, _min, _max, _std, _median = temp_df.mean(), temp_df.min(), temp_df.max(), temp_df.std(), temp_df.median()
        for key, item in self.front_limit_dict.items():
            try:
                test_num, test_txt = key.split(":", 1)
            except ValueError:
                raise Exception("重大错误: 测试数据测试项目没有指定TEST_NO和TEST_TEXT,测试程序漏洞@!!!!!")
            reject_qty = 0
            logic_or = []
            if item.l_limit_type == ">":
                logic_or.append((self.front_df[key] <= item.l_limit))
            if item.l_limit_type == ">=":
                logic_or.append((self.front_df[key] < item.l_limit))
            if item.h_limit_type == "<":
                logic_or.append((self.front_df[key] >= item.h_limit))
            if item.h_limit_type == "<=":
                logic_or.append((self.front_df[key] > item.h_limit))
            if len(logic_or) == 1:
                items = logic_or[0]
                reject_qty = len(self.front_df.loc[items])
            if len(logic_or) > 1:
                items = np.logical_or(*logic_or)
                reject_qty = len(self.front_df.loc[items])

            fail_qty = first_fail_dict.get(item.test_num, 0)
            temp_std, temp_mean = _std[key], _mean[key]
            cpk = 0 if temp_std == 0 else round(min([(item.h_limit - temp_mean) / (3 * temp_std),
                                                     (temp_mean - item.l_limit) / (3 * temp_std)]), 6)
            temp_dict = {
                "SORT": item.test_sort,
                "TEST_NUM": test_num,
                "TEST_TXT": test_txt,
                "UNITS": str(item.unit),
                "LO_LIMIT": item.l_limit,
                "HI_LIMIT": item.h_limit,
                "Average": round(_mean[key], 6),
                "Stdev": round(_std[key], 6),
                "Cpk": cpk,
                "Text": key,
                "Total": len(self.front_df),
                "Fail": fail_qty,
                "Fail/Total": "{}%".format(round(fail_qty / len(self.front_df) * 100, 3)),
                "Reject": reject_qty,
                "Reject/Total": "{}%".format(round(reject_qty / len(self.front_df) * 100, 3)),
                "Min": round(_min[key], 6),
                "Max": round(_max[key], 6),
                "LO_LIMIT_TYPE": item.l_limit_type,
                "HI_LIMIT_TYPE": item.h_limit_type,
            }
            self.cpk_list.append(temp_dict)
            cpk_dict[key] = temp_dict
        self.cpk_dict = cpk_dict
        self.calculation_cpk_signal.emit(self.cpk_list, self.cpk_dict)

    def restore_limit(self):
        """ limit 还原 """
        self.front_limit_dict = self.limit_dict.copy()
        self.calculation_new_cpk()

    def new_limit(self, limit_new: Dict[str, Tuple[float, float, str, str]], only_pass: bool = False) -> bool:
        """
        良率计算方式, 先获取到Limit, 根据test_num:test_text来
        更新当前Limit, 然后根据Limit来重新把DataFrame刷新一下, 然后进行计算
        """
        if self.front_df is None:
            return False
        for key, each in limit_new.items():
            limit_class = self.front_limit_dict[key]  # type:GlobalVariable.LimitClass
            new_limit_class = GlobalVariable.LimitClass(
                limit_class.test_text,
                limit_class.test_num,
                each[0],
                each[1],
                limit_class.unit,
                limit_class.test_sort,
                each[2],
                each[3],
            )
            self.front_limit_dict[key] = new_limit_class
        self.calculation_new_cpk(only_pass=only_pass)

    def calculation_new_cpk(self, only_pass: bool = False):
        """
        对Limit做了修改后, 先要做触发这个的动作
        :return:
        """
        self.front_df = self.df.copy()
        self.front_df["FAIL_FLAG"] = 1  # 1是PASS
        self.front_df["FIRST_FAIL"] = 0
        " 更新测试数据 "
        for key, each in self.front_limit_dict.items():
            if key not in self.front_df:
                continue
            items = (self.front_df.FAIL_FLAG == 1)
            logic_or = []
            if each.l_limit_type == ">":
                logic_or.append((self.front_df[key] <= each.l_limit))
            if each.l_limit_type == ">=":
                logic_or.append((self.front_df[key] < each.l_limit))
            if each.h_limit_type == "<":
                logic_or.append((self.front_df[key] >= each.h_limit))
            if each.h_limit_type == "<=":
                logic_or.append((self.front_df[key] > each.h_limit))
            if len(logic_or) == 1:
                items = items & logic_or[0]
            if len(logic_or) > 1:
                items = items & np.logical_or(*logic_or)
            self.front_df.loc[items, "FAIL_FLAG"] = 0
            self.front_df.loc[items, "FIRST_FAIL"] = each.test_num
        if only_pass:
            self.front_df = self.front_df[self.front_df.FAIL_FLAG == 1]
        self.calculation_cpk()
        return True

    def verify_pass_have_nan(self) -> bool:
        """ PASS的数据中含有空值! 检测的时候要定位到ID """

    def verify_test_no_repetition(self) -> bool:
        """ 有重复的TEST_NO! 检测的时候要定位到ID """

    def screen_df(self, text_column: list):
        """
        只看选取的项目
        筛选: 底层数据也改为NewData
        1. 设置 df
        2. 设置 limit_dict
        3. 设置 front_df, chart_df
        """
        temp_dict = {}
        for each in text_column:
            temp_dict[each] = self.front_limit_dict[each]
        self.set_limit_dict(temp_dict)
        temp_df = self.front_df[GlobalVariable.DF_COLUMNS + list(self.limit_dict.keys())]
        self.set_new_df(temp_df)
        self.set_new_front_df(temp_df)
        self.calculation_new_cpk()

    def cut_data_by_limit(self, func: str, limit_new: Dict[str, Tuple[float, float]]) -> bool:
        """ 删除limit外的数据或是limit内的数据&front_df """
        if self.df is None:
            return False
        if func == 'inner':
            for key, each in limit_new.items():
                items = ((self.df[key] < each[0]) | (self.df[key] > each[1]))
                self.df = self.df.drop(self.df.loc[items].index)
                limit_class = self.front_limit_dict[key]  # type:GlobalVariable.LimitClass
                new_limit_class = GlobalVariable.LimitClass(
                    limit_class.test_text,
                    limit_class.test_num,
                    each[0],
                    each[1],
                    limit_class.unit,
                    limit_class.test_sort,
                    limit_class.l_limit_type,
                    limit_class.h_limit_type,
                )
                self.front_limit_dict[key] = new_limit_class
            self.calculation_new_cpk()

    def pivot_front_df(self, text_column: list):
        pass

    def get_df_corr(self) -> Union[pd.DataFrame, None]:
        if self.front_df is None:
            return
        return self.front_df[self.front_limit_dict.keys()].corr()

    def save_to_obj(self, file_path):
        """ 将li obj 保存到二进制数据中 """
        for each in self.save_file_objs:
            obj = getattr(self, each)  # type:object
            if obj is None:
                continue
            save_obj_to_pickle(obj, file_path, each)
        for each in self.save_file_json:
            obj = getattr(self, each)  # type:pd.DataFrame
            if obj is None:
                continue
            save_df_to_json(obj, file_path, each)
        for each in self.save_file_limit_class:
            obj = getattr(self, each)  # type:Dict[str, GlobalVariable.LimitClass]
            if obj is None:
                continue
            """ 将GlobalVariable.LimitClass 转为dict保存 """
            temp_dict = dict()
            for key, item in obj.items():
                temp_dict[key] = tuple(item)
            save_obj_to_pickle(temp_dict, file_path, each)

    def read_by_obj(self, file_path):
        """ 从二进制文件中读取li obj """
        for each in self.save_file_objs:
            setattr(self, each, get_obj_by_pickle(file_path, each))
        for each in self.save_file_json:
            setattr(self, each, get_df_by_json(file_path, each))
        for each in self.save_file_limit_class:
            obj = get_obj_by_pickle(file_path, each)
            if not isinstance(obj, dict):
                return
            temp_dict = dict()
            for key, item in obj.items():
                limit = GlobalVariable.LimitClass._make(item)
                temp_dict[key] = limit
            setattr(self, each, temp_dict)

    def calculation_process(self):
        """
        制程能力数据
        :return:
        """
        if self.summary_df is None:
            return False
        if self.df is None:
            return False
