# -*- coding: utf-8 -*-
"""
@Time ： 2023-05-18 14:26
@Author ： Jinbo CHEN
@File ：dematel.py
"""
import pandas as pd
import numpy as np
from pyanp import limitmatrix as lm


class DEMATEL:
    def __init__(self, direct_relation: pd.DataFrame, split_sizes):
        # normalized direct-relation matrix
        s = 1 / direct_relation.sum(axis=1).max()
        normalized_dr = direct_relation.applymap(lambda x: x * s)

        # identity matrix
        identity = pd.DataFrame(np.eye(*normalized_dr.shape), columns=normalized_dr.columns, index=normalized_dr.index)

        # The total-relation matrix
        t_arr = np.dot(normalized_dr, pd.DataFrame(np.linalg.inv(identity - normalized_dr)))
        total_relation = pd.DataFrame(t_arr, columns=normalized_dr.columns, index=normalized_dr.index)

        # level 2 factors' RDPE values(degree of prominence and net cause/effect values)
        rdpe_list_l2 = []
        st = 0
        for ed in np.cumsum(split_sizes):
            result = pd.DataFrame()
            total_relation_l2 = total_relation.iloc[st:ed, st:ed]
            result['R'] = total_relation_l2.T.sum()
            result['D'] = total_relation_l2.sum()
            result['P'] = total_relation_l2.T.sum() + total_relation_l2.sum()
            result['E'] = total_relation_l2.T.sum() - total_relation_l2.sum()
            st = ed
            rdpe_list_l2.append(result)

        # degree of prominence and net cause/effect values
        result = pd.DataFrame()
        result['R'] = total_relation.T.sum()
        result['D'] = total_relation.sum()
        result['P'] = total_relation.T.sum() + total_relation.sum()
        result['E'] = total_relation.T.sum() - total_relation.sum()
        result.index = normalized_dr.index

        # output
        self.Z = direct_relation
        self.N = normalized_dr
        self.T = total_relation
        self.RDPE = result
        self.RDPEs = rdpe_list_l2


class Grey:
    def __init__(self, gls: dict, df: pd.DataFrame):
        """
        :param gls: grey linguistic scale
        :param df:  questionary collected
        """
        # lower boundary
        df_lower = df.applymap(lambda x: gls.get(x)[0])
        # upper boundary
        df_upper = df.applymap(lambda x: gls.get(x)[1])
        # normalized values for lower matrix
        norm_lower = df_lower.apply(lambda x: (x - x.min()) / (df_upper[x.name].max() - x.min()))
        # normalized values for upper matrix
        norm_upper = df_upper.apply(lambda x: (x - x.min()) / (df_upper[x.name].max() - x.min()))
        # Y matrix
        Y = (norm_lower * (norm_lower.apply(lambda x: 1 - x)) + (norm_upper.pow(2))) / (
                norm_lower.apply(lambda x: 1 - x) + norm_upper)
        # crisp direct-relation matrix(Z)
        Z = df_lower.min() + (Y * (df_upper.max() - df_lower.min()))
        Z.fillna(.0, inplace=True)

        # output
        self.df_lower = df_lower
        self.df_upper = df_upper
        self.norm_lower = norm_lower
        self.norm_upper = norm_upper
        self.Y = Y
        self.Z = Z



class DANP:
    def __init__(self, df_tim_1: pd.DataFrame, df_tim_2: pd.DataFrame, split_sizes: list):
        """
        :param df_tim_1:  level 1 factors total influence matrix
        :param df_tim_2:  level 2 factors total influence matrix
        :param split_sizes: a list of size for each level 1 group
        """
        # check if split_sizes matches DataFrame
        assert sum(split_sizes) == df_tim_2.shape[0] == df_tim_2.shape[1], "拆分规则与 DataFrame 大小不匹配"

        # normalize each matrix
        def func_norm(x, i, j):
            return x / x.sum()
        tim_normed = self._apply_sperately(df_tim_2, split_sizes, func_norm)


        # unweighted_super_matrix
        usm = tim_normed.T

        # weight matrix derived from T1
        df_weights = df_tim_1.apply(lambda x: x / x.sum(), axis=1)
        def func_multiply_weight(x, i, j):
            return x * df_weights.iloc[i, j]
        # derive weighted super matrix
        wsm = self._apply_sperately(usm, split_sizes, func_multiply_weight)

        # calculating limit of weighted matrix
        limit_wsm = pd.DataFrame(lm.calculus(wsm.values),  columns=df_tim_2.columns, index=df_tim_2.index)
        limit_wsm_1 = pd.DataFrame(lm.calculus(df_tim_1.values), columns=df_tim_1.columns, index=df_tim_1.index)

        self.T2_NORM = tim_normed
        self.T_WEIGHT = df_weights
        self.USM = usm
        self.WSM = wsm
        self.LWSM = limit_wsm
        self.LWSM1 = limit_wsm_1

    def _apply_sperately(self, super_matrix: pd.DataFrame, split_sizes: list, func):
        # split by row
        df_rows = np.array_split(super_matrix, np.cumsum(split_sizes[:-1]), axis=0)
        # split by column
        df_cells = []
        for d in df_rows:
            cells = np.array_split(d, np.cumsum(split_sizes[:-1]), axis=1)
            df_cells.append(cells)

        # normalize for each cell
        for i in range(len(df_cells)):
            for j in range(len(df_cells[i])):
                cell = df_cells[i][j]
                cell = cell.apply(func, args=(i, j), axis=1)
                df_cells[i][j] = cell
        # merge normalized cells
        df_merged = pd.concat([pd.concat(df_cells[i], axis=1) for i in range(len(df_cells))])
        return df_merged



