from typing import Optional

import numpy as np
import pandas as pd
from joblib import delayed, Parallel

from src.columns.cols import all_cols
from constant import pre_handle_remove_missing_data
from src.train import FlowPipline, TrainParam, FlowInterrupt, FlowEnum
from src.util.common_util import extract_all, fill_str_with_blank, process_str, process_str_array, load_vectory, \
    process_numeric_col, save_vectory, get_required_columns


@FlowPipline.bind(FlowEnum.step2_check_data, is_train=True, is_predict=True)
def check_column(df: Optional[pd.DataFrame | None], params: TrainParam):
    need_cols = []
    required_columns = get_required_columns()
    for col in required_columns:
        if col not in df.columns.tolist():
            need_cols.append(col)
    if len(need_cols) > 0:
        raise FlowInterrupt(f"字段不完整，还需字段：{need_cols}")
    params.console.print("字段检查完毕")
    return df[required_columns]


@FlowPipline.bind(FlowEnum.step3_delete_data, is_train=True, is_predict=True)
def drop_row(df: Optional[pd.DataFrame | None], params: TrainParam) -> pd.DataFrame:
    if pre_handle_remove_missing_data:
        before = len(df)
        cols = [col for col in ["server.bytes", "req_uri", "res_status_code", "req_method"] if
                col in df.columns.tolist()]
        mask = ~df[cols].isin([0, "", "/"]).any(axis=1) & df[cols].notna().all(axis=1)
        data = df[mask]
        params.console.print(f"删除了：{before - len(data)}条数据")
    return df


@FlowPipline.bind(FlowEnum.step4_delete_file_data, is_train=True, is_predict=True)
def remove_binary_or_file(df: Optional[pd.DataFrame | None], params: TrainParam) -> pd.DataFrame:
    from urllib.parse import urlparse
    file_suffix_keyword = [
        ".jpg", ".jpeg", ".png", ".gif", ".webp", ".svg", ".ico", ".bmp", ".tiff", ".avif", ".heic",
        ".mp3", ".wav", ".ogg", ".flac", ".aac", ".m4a", ".weba",
        ".mp4", ".webm", ".avi", ".mov", ".mkv", ".flv", ".wmv", ".m4v", ".ts", ".3gp",
        ".pdf", ".doc", ".docx", ".xls", ".xlsx", ".ppt",
        ".pptx", ".odt", ".ods", ".odp", ".txt", ".rtf", ".md", ".csv",
        ".html", ".htm", ".css", ".js", ".xhtml", ".rss",
        ".zip", ".rar", ".7z", ".tar", ".gz", ".bz2", ".xz", ".zst",
        ".woff", ".woff2", ".ttf", ".otf", ".eot", ".apk",
        ".swf", ".fla", ".blend", ".dae", ".glb", ".gltf",
        ".webmanifest", ".appcache", "icon", "ico", "appico", "m3u8"
    ]
    content_type_has_binary = ['vnd.', 'excel', 'xlsx', 'javascript', 'font', 'ttf', 'otf', 'video', 'audio', 'image',
                               'pdf']

    def is_file_suffix(url):
        try:
            parsed = urlparse(f'{url}')
            if parsed.path in ('', '/'):
                return False
        except ValueError:
            pass
        for col in file_suffix_keyword:
            if col in f'{url}':
                return True
        return False

    def content_type_is_binary(x):
        """
        判断content_type是否包含binary_cols的关键字
        """
        return any(col in x for col in content_type_has_binary)

    def delete_row_if_true(df, col, delete_percentage: float):
        """
        删除col列为True的行，按delete_percentage百分比随机删除
        并打印删除信息
        """
        params.console.print(f"要删除的字段{col}")
        params.console.print(f"当前总数:{len(df)}")
        true_rows = df[df[col]]
        total_rows = len(df)
        true_count = len(true_rows)
        delete_count = int(round(delete_percentage * true_count))
        if delete_count == 0:
            params.console.print("无删除（delete_count == 0）")
            return df
        delete_indices = np.random.choice(true_rows.index, delete_count, replace=False)
        df_new = df.drop(delete_indices)
        params.console.print(f"True数量: {len(true_rows)}")
        params.console.print(f"删除数量: {delete_count}")
        params.console.print(f"删除占True比例: {delete_count / true_count:.2%}")
        params.console.print(f"剩余总数: {len(df_new)}")
        params.console.print(f"删除占总体比例: {delete_count / total_rows:.2%}")
        return df_new

    df['is_file_suffix'] = df['req_uri'].progress_apply(is_file_suffix)
    df['content_type_is_binary'] = df['http.response-content-type'].progress_apply(content_type_is_binary)
    df = delete_row_if_true(df, 'is_file_suffix', params.delete_file_suffix)
    df = delete_row_if_true(df, 'content_type_is_binary', params.delete_file_suffix)
    df = df.drop(columns=['is_file_suffix', 'content_type_is_binary'])
    params.origin_df = df.copy(deep=True)
    return df


@FlowPipline.bind(FlowEnum.step5_split_uri_to_cols, is_train=True, is_predict=True)
def split_uri_to_cols(df, params):
    params.console.print(f"添加新的三个列，不删除http.uri")
    results = Parallel(n_jobs=-1)(delayed(extract_all)(uri) for uri in df['http.uri'])
    new_cols = pd.DataFrame(results, columns=['http.uri_domain', 'http.uri_path', 'http.uri_params'])
    return pd.concat([df, new_cols], axis=1).reset_index(drop=True)


@FlowPipline.bind(FlowEnum.step6_pre_handle_str, is_train=True, is_predict=True)
def pre_handle_str(df, params: TrainParam):
    """文本哈希，删除原来文本值"""
    str_col = [col['col'] for col in all_cols if col['type'] == 'string']
    columns = [col for col in df.columns.to_list() if col in str_col]
    params.console.print(f"文件内的文本字段有:{len(columns)}")
    df = fill_str_with_blank(df, columns)
    dfs = Parallel(n_jobs=-1)(
        delayed(process_str)(df[col], col, i, len(columns))
        for i, col in enumerate(columns)
    )
    df = pd.concat([df, *dfs], axis=1).reset_index(drop=True)
    df.drop(columns=columns, inplace=True)
    return df


@FlowPipline.bind(FlowEnum.step7_fill_num, is_train=True, is_predict=True)
def pre_handle_str(df, params: TrainParam):
    number_col = [col['col'] for col in all_cols if col['type'] == 'number']
    cols = [col for col in df.columns.to_list() if col in number_col]
    params.console.print(f"文件内的数值字段有:{len(cols)}")
    df.loc[:, cols] = (
        df[cols]
        .replace('', np.nan)
        .fillna(-1)
        .astype(int)
    )
    return df


@FlowPipline.bind(FlowEnum.step8_pre_handle_array, is_train=True, is_predict=True)
def pre_handle_array(df, params: TrainParam):
    array_str_cols = [col for col in df.columns.to_list() if
                      col in [col['col'] for col in all_cols if col['type'] == 'array_str']]
    params.console.print(f"文件内的文本数组:{len(array_str_cols)}")
    dfs = Parallel(n_jobs=-1)(
        delayed(process_str_array)(df[col], col, i, len(array_str_cols))
        for i, col in enumerate(array_str_cols)
    )
    df = pd.concat([df, *dfs], axis=1).reset_index(drop=True)
    df.drop(columns=array_str_cols, inplace=True)

    array_num_cols = [col for col in df.columns.to_list() if
                      col in [col['col'] for col in all_cols if col['type'] == 'array_num']]
    params.console.print(f"文件内的数值数组:{len(array_num_cols)}")
    if params.is_train:
        column_map = {}
    else:
        column_map = load_vectory()
    results = Parallel(n_jobs=-1)(
        delayed(process_numeric_col)(col, df[col], params.is_train, column_map)
        for col in array_num_cols
    )
    df_list = [df]
    for res, map in results:
        column_map.update(map)
        df_list.append(res)
    if params.is_train:
        save_vectory(column_map)
    df = pd.concat(df_list, axis=1).reset_index(drop=True)
    df.drop(columns=array_num_cols, inplace=True)
    return df


@FlowPipline.bind(FlowEnum.step9_fill_bool_with_num, is_train=True, is_predict=True)
def fill_bool_with_num(df, params: TrainParam):
    bool_cols = [col for col in df.columns.to_list() if
                 col in [col['col'] for col in all_cols if col['type'] == 'bool']]
    params.console.print(f"文件内的布尔字段:{len(bool_cols)}")
    for i, col in enumerate(bool_cols):
        params.console.print(f"[预处理]逻辑值:{i + 1}/{len(bool_cols)}  {col}")
        df[col] = df[col].astype(str).isin(["True", "1"]).astype(int)
    return df
