import pandas as pd
import numpy as np
import math
import json
from scipy.stats import bartlett
from sklearn import linear_model
from sklearn.decomposition import PCA
from factor_analyzer import FactorAnalyzer
import time
# from factor_analyzer.factor_analyzer import calculate_kmo, calculate_bartlett_sphericity

# 标准化
def standardization(data:np.ndarray)-> np.ndarray:
    _mean = np.mean(data, axis=0)
    _std = np.std(data, axis=0, ddof=1) # 样本标准差
    return (data - _mean) / _std

# KMO检验
def kmo(dataset_corr:np.ndarray):
    corr_inv = np.linalg.inv(dataset_corr)
    nrow_inv_corr, ncol_inv_corr = dataset_corr.shape
    A = np.ones((nrow_inv_corr,ncol_inv_corr))
    for i in range(0,nrow_inv_corr,1):
        for j in range(i,ncol_inv_corr,1):
            A[i,j] = -(corr_inv[i,j])/(math.sqrt(corr_inv[i,i]*corr_inv[j,j]))
            A[j,i] = A[i,j]
    dataset_corr = np.asarray(dataset_corr)
    kmo_num = np.sum(np.square(dataset_corr)) - np.sum(np.square(np.diagonal(A)))
    kmo_denom = kmo_num + np.sum(np.square(A)) - np.sum(np.square(np.diagonal(A)))
    kmo_value = kmo_num / kmo_denom
    return kmo_value

if __name__ == "__main__":
    excel_path = "eva_function_test/original_data.xlsx"
    df = pd.read_excel(io=excel_path)
    original_data = df.to_numpy()

    # step 1 Normalization
    data = standardization(original_data)
    data_with_zero = standardization(np.concatenate((original_data, np.zeros((1, original_data.shape[1]))), axis=0))

    # step 2 KMO & Bartlett's test
    dataset_corr = np.corrcoef(data.T) # 相关系数矩阵
    kmo_value = kmo(dataset_corr)
    stat, p = bartlett(*[dataset_corr[i] for i in range(dataset_corr.__len__())])
    if kmo_value < 0.5 or p < 0.95:
        exit()

    # step 3 Principal Components Analysis and Factor Analyze
    modelPCA = PCA(n_components=None).fit(data)
    factor_num = (modelPCA.explained_variance_ > 1).sum() # 计算方差大于1的特征数量

    modelFA = FactorAnalyzer(n_factors=factor_num, rotation='varimax', method="principal")
    modelFA_with_zero = FactorAnalyzer(n_factors=factor_num, rotation='varimax', method="principal")
    faTransData = modelFA.fit_transform(data)
    faTransData_with_zero = modelFA_with_zero.fit_transform(data_with_zero)

    # step 4 Calculate the APCS vaule
    zeroData = faTransData_with_zero[-1]
    apcsData = faTransData - zeroData

    # step 5 & 6 Multiple Linear Regression & Contribution Rate Calculate
    resultMLR = []
    apcs_mean_data = np.mean(apcsData, axis=0)
    apcs_cal_data = np.abs(np.append(apcs_mean_data, 1))
    for y_data, label in zip(np.split(original_data,original_data.shape[1], axis=1), df.keys()):
        modelMLR = linear_model.LinearRegression()
        modelMLR.fit(X=apcsData, y=y_data)
        parameters = np.abs(np.concatenate((modelMLR.coef_.reshape((factor_num,)), modelMLR.intercept_)))
        predict_abs_data = np.dot(parameters, apcs_cal_data)
        contribution_rate = []
        for _parameter, _apcs in zip(parameters, apcs_cal_data):
            contribution_rate.append(_parameter*_apcs/predict_abs_data)
        resultMLR.append({'label':label, 
                          'coef' : modelMLR.coef_.tolist(), 
                          'intercept' : modelMLR.intercept_.tolist(),
                          'r2_score' : modelMLR.score(apcsData, y_data).tolist(),
                          'adjusted_r2' : 1 - (1-modelMLR.score(apcsData, y_data))*(len (y_data)-1)/(len(y_data)-apcsData.shape[1]-1),
                          'contribution_rate' : contribution_rate
                          })
    with open('./1.json', 'w') as file:
        json.dump(resultMLR, file)
    pass