import re
from calendar import monthrange
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor

import numpy as np
import pandas as pd

from config import *


def extract(data_file_path):
    logging.info(f"Extracting Data from file <{data_file_path}>")
    d = re.findall("\d+", os.path.basename(data_file_path))
    # Sheet name:  -2, -1, 1, 2, ... , 27 [, 28, 29]
    sheet_name_list = list(range(-2, monthrange(int(d[0]), int(d[1]))[1] - 1, 1))
    sheet_name_list.remove(0)
    # Feature data for model training, validating and evaluation
    feature_data_tensor = np.empty((0, len(feature_name_list)))
    # Extract data for data analysis and correlation analysis
    extract_data_tensor = np.empty((len(feature_name_list), 0))
    for sheet_name in sheet_name_list:
        sheet = pd.read_excel(
            io=data_file_path,
            sheet_name=str(sheet_name),
            nrows=len(feature_name_list),
            header=1,
        )
        # Useless Columns
        sheet.drop(columns=["项目", "Unnamed: 1", "班平均", "班平均.1", "日平均"], inplace=True)
        extract_data_tensor = np.concatenate(
            (extract_data_tensor, np.array(sheet)), axis=1
        )
        feature_data_vector = np.zeros(len(feature_name_list))
        for i, feature in enumerate(feature_name_list):
            extract_data_line = np.array(sheet.loc[i])
            extract_data_index = ~np.isnan(extract_data_line)
            if np.sum(extract_data_index) > 0:
                feature_data_vector[i] = np.mean(extract_data_line[extract_data_index])
        feature_data_tensor = np.concatenate(
            (feature_data_tensor, feature_data_vector.reshape(1, -1)), axis=0
        )

    return feature_data_tensor, extract_data_tensor


def data_washing(extract_data, feature_data, vis=True):
    """
    Args:
        extract_data (np.ndarray): <feature_nums, total_hours>
        feature_data (np.ndarray): <days, feature_nums>
    Return:
        newextract_data (list): [<feature_nums, not_nan_data>]. Cause NaN is removed, array length differs
        newfeature_data (np.ndarray): <days, feature_nums>. Keep the same size of `feature_data`
    """
    newextract_data = []
    newfeature_data = np.empty((len(feature_data), 0))

    print("Feature data after washing...")
    for i in range(len(feature_name_list)):
        # === Washing on `extract_data`
        index = ~np.isnan(extract_data[i])
        i_extract_data = extract_data[i, index]
        total_count = np.sum(index)

        mean_val = np.mean(i_extract_data)
        std_val = np.std(i_extract_data)
        median_val = np.median(i_extract_data)

        outlier_val = [mean_val - 2.698 * std_val, mean_val + 2.698 * std_val]

        # index of Normal distribution within 2.698σ
        sigma_mask = (outlier_val[0] < i_extract_data) & (
            outlier_val[1] > i_extract_data
        )
        # no washing if there are too many outlier values
        washed_ifeature = i_extract_data
        if np.sum(sigma_mask) >= total_count * 0.955:
            washed_ifeature = np.where(sigma_mask, i_extract_data, np.NaN)

        # washing if value is abnormal (8 multiple)
        abnormal_mask = (washed_ifeature > median_val / 8) & (
            washed_ifeature < median_val * 8
        )
        cleaned_ifeature = np.where(abnormal_mask, washed_ifeature, np.NaN)

        # washed_extract_data.append(i_extract_data[washed_index][cleaned_index])
        newextract_data.append(cleaned_ifeature)

        # === Washing on `feature_data`
        i_feature_data = feature_data[:, i]

        sigma_mask = (outlier_val[0] < i_feature_data) & (
            outlier_val[1] > i_feature_data
        )

        washed_ifeature = np.where(sigma_mask, i_feature_data, np.NaN)
        # washing if value is abnormal (8 multiple)
        abnormal_mask = (washed_ifeature > median_val / 8) & (
            washed_ifeature < median_val * 8
        )
        cleaned_ifeature = np.where(abnormal_mask, washed_ifeature, np.NaN)

        if np.sum(abnormal_mask) < 2 / 3 * len(
            abnormal_mask
        ):  # drop the feature if few useful information

            if vis:
                print(
                    "{0:8} Dropped!\tValid data num: {1}".format(
                        feature_name_list[i],
                        np.sum(abnormal_mask),
                    )
                )

            continue
        if vis:
            print(
                "{3:8}\tOri: {0}\t->\tValid(SigmaClean): {1}\t->\tValid(AbClean): {2}".format(
                    i_feature_data.shape[0],
                    np.sum(sigma_mask),
                    np.sum(abnormal_mask),
                    feature_name_list[i],
                )
            )
        newfeature_data = np.concatenate(
            (newfeature_data, cleaned_ifeature.reshape(-1, 1)), axis=1
        )
    feature_delete_index = [
        i
        for i in range(newfeature_data.shape[0])
        if np.sum(np.isnan(newfeature_data[i])) > 0
    ]
    newfeature_data = np.delete(newfeature_data, feature_delete_index, axis=0)
    return newextract_data, newfeature_data


if __name__ == "__main__":
    feature_data = np.empty((0, len(feature_name_list)))
    extract_data = np.empty((len(feature_name_list), 0))
    # Extract
    with ProcessPoolExecutor(max_workers=6) as executor:
        data_list = list(executor.map(extract, data_file_path_list))
    # Gather
    for i, data_file_path in enumerate(data_file_path_list):
        feature_data = np.concatenate((feature_data, data_list[i][0]), axis=0)
        extract_data = np.concatenate((extract_data, data_list[i][1]), axis=1)
    # Save
    np.save(feature_data_file_path, feature_data)
    np.save(extract_data_file_path, extract_data)

    washed_extract_data, washed_feature_data = data_washing(extract_data, feature_data)
    np.save(washed_feature_data_file_path, washed_feature_data)
    np.save(washed_extract_data_file_path, np.array(washed_extract_data, dtype=object))

    logger.info("Done")
