#
# Copyright (c) 2025 CAX Conversion Project
#

from io import BytesIO
import os
import pandas as pd
import json

def generate_excel_quality_report(file_path, time_check_string=None, outlier_check_string=None):
    """
    生成excel数据质量检测报告。

    Args:
        input_json:, time_check, outlier_check

    Returns:
        str: 返回报告json字符串
    """

    time_check = json.loads(time_check_string) if time_check_string else None
    outlier_check = json.loads(outlier_check_string) if outlier_check_string else None

    # 使用pandas读取所有sheet
    xls = pd.ExcelFile(file_path, engine='openpyxl')

    # 存储数据质量信息的字典
    report_data = []

    # 遍历所有的 sheet
    for sheet_name in xls.sheet_names:
        # 读取每个 sheet
        df = pd.read_excel(file_path, sheet_name=sheet_name)

        # 获取字段（列名）
        columns = df.columns.tolist()

        # 统计包含空值的行数
        rows_with_nulls = df.isnull().any(axis=1).sum()

        # 统计总行数
        total_rows = len(df)

        # 计算空值行数量与总行数的比值
        null_ratio = rows_with_nulls / total_rows if total_rows > 0 else 0

        # 定义常见的日期和时间格式列表
        valid_date_formats = [
            '%Y-%m-%d',         # 2023-10-08
            '%d/%m/%Y',         # 08/10/2023
            '%m-%d-%Y',         # 10-08-2023
            '%d-%m-%Y',         # 08-10-2023
            '%Y/%m/%d',         # 2023/10/08
            '%m/%d/%Y',         # 10/08/2023
            '%d %b %Y',         # 08 Oct 2023
            '%d %B %Y',         # 08 October 2023
            '%Y.%m.%d',         # 2023.10.08
            '%Y%m%d',           # 20231008
            '%Y-%m-%d %H:%M:%S',# 2023-10-08 14:30:59
            '%Y/%m/%d %H:%M:%S',# 2023/10/08 14:30:59
            '%d-%m-%Y %H:%M:%S',# 08-10-2023 14:30:59
            '%m-%d-%Y %H:%M:%S',# 10-08-2023 14:30:59
            '%d/%m/%Y %H:%M:%S',# 08/10/2023 14:30:59
            '%d %b %Y %H:%M:%S',# 08 Oct 2023 14:30:59
            '%d %B %Y %H:%M:%S',# 08 October 2023 14:30:59
            '%Y-%m-%d %H:%M',   # 2023-10-08 14:30
            '%Y/%m/%d %H:%M',   # 2023/10/08 14:30
        ]

        # 检查是否为有效日期和时间
        time_columns = "无时间字段"
        if time_check is not None:
            for time_sheet_name, time_column_names in time_check.items():
                if sheet_name == time_sheet_name:
                    time_false_columns = []
                    time_columns = f"所有时间格式正确"
                    for time_column_name in time_column_names:
                        for index, value in df[time_column_name].iloc[1:].items():
                            # 检查是否为符合特定格式的日期或时间
                            is_valid_date = False
                            for date_format in valid_date_formats:
                                try:
                                    time_value = pd.to_datetime(value, format=date_format, errors='raise')
                                    is_valid_date = True
                                    break
                                except (ValueError, TypeError):
                                    continue
                            # 如果没有找到有效的日期格式
                            if not is_valid_date:
                                time_false_columns.append(time_column_name)
                                break

                    if len(time_false_columns) != 0:
                        time_columns = f"时间格式有误，字段：{time_false_columns}"

        # 使用 Tukey 方法检测异常值
        outlier_columns = []
        if outlier_check != None:
            for outlier_sheet_name, outlier_column_names in outlier_check.items():
                if sheet_name == outlier_sheet_name:
                    for outlier_column_name in outlier_column_names:
                        lower_bound, upper_bound, outliers = detect_outliers_tukey(df[outlier_column_name])
                        if len(outliers) > 0:
                            outlier_columns.append(outlier_column_name)


        # 统计数据类型不一致的字段
        inconsistent_columns = []
        for column in columns:
            unique_dtypes = df[column].dropna().apply(type).unique()
            if len(unique_dtypes) > 1:
                inconsistent_columns.append(column)

        # 将统计结果存储到 report_data
        report_data.append({
            'Sheet Name': sheet_name,
            'Number of Columns': len(columns),
            'Rows with Nulls': rows_with_nulls,
            'Total Number of Rows': total_rows,
            'Inconsistent Columns Count': len(inconsistent_columns),
            'Inconsistent Columns': inconsistent_columns,
            'Time Columns': time_columns,
            'Outliers': outlier_columns
        })

    report_dict = {
        "file_name": os.path.basename(file_path),
        "data_quality_report": []
    }

    # 构建每个 sheet 的报告
    for report in report_data:
        sheet_report = {
            "sheet_name": report['Sheet Name'],
            "number_of_columns": int(report['Number of Columns']),
            "rows_with_nulls": int(report['Rows with Nulls']),
            "total_number_of_rows": int(report['Total Number of Rows']),
            "inconsistent_columns_count": int(report['Inconsistent Columns Count']),
            "inconsistent_columns": report['Inconsistent Columns'],
            "oulier_check": report['Outliers'],
            "time_check": report['Time Columns']
        }
        report_dict["data_quality_report"].append(sheet_report)

    report_json_string = json.dumps(report_dict, indent=4, ensure_ascii=False)

    return report_json_string

# Tukey 方法检测异常值
def detect_outliers_tukey(data):

    # 只处理数值数据，去除非数值型和缺失值
    data = pd.to_numeric(data, errors='coerce').dropna()

    # 如果数据为空，直接返回无异常值
    if len(data) == 0:
        return None, None, []

    # 计算 Q1 和 Q3
    Q1 = data.quantile(0.25)
    Q3 = data.quantile(0.75)

    # 计算 IQR
    IQR = Q3 - Q1

    # 确定上下限
    lower_bound = Q1 - 1.5 * IQR
    upper_bound = Q3 + 1.5 * IQR

    # 检测异常值
    outliers = data[(data < lower_bound) | (data > upper_bound)]

    return lower_bound, upper_bound, outliers
