import os
import pyarrow.parquet as pq
import pandas as pd
from typing import Tuple, List, Dict, Any


def check_snappy_compressed_parquet(file_path: str) -> Tuple[bool, List[str], Dict[str, Any]]:
    """
    检测Snappy压缩的Parquet文件异常

    参数:
        file_path: Parquet文件路径

    返回:
        Tuple[是否异常, 异常描述列表, 文件元信息字典]
    """
    anomalies = []
    file_info = {
        'path': file_path,
        'compression': 'snappy',
        'size_mb': os.path.getsize(file_path) / (1024 * 1024) if os.path.exists(file_path) else 0,
        'valid': False
    }

    # 1. 基础检查
    if not os.path.exists(file_path):
        anomalies.append("文件不存在")
        return (True, anomalies, file_info)

    if file_info['size_mb'] == 0:
        anomalies.append("文件大小为0MB")
        return (True, anomalies, file_info)

    try:
        # 2. 读取文件元数据
        parquet_file = pq.ParquetFile(file_path)
        metadata = parquet_file.metadata

        # 更新文件信息
        file_info.update({
            'num_rows': metadata.num_rows,
            'num_columns': metadata.num_columns,
            'created_by': metadata.created_by,
            'version': metadata.version,
            'row_groups': metadata.num_row_groups,
            'valid': True
        })

        # 3. 检查压缩格式
        for i in range(metadata.num_row_groups):
            row_group = metadata.row_group(i)
            for j in range(row_group.num_columns):
                col = row_group.column(j)
                if col.compression != 'SNAPPY':
                    anomalies.append(f"行组{i}列{j}使用非常规压缩: {col.compression}")

        # 4. 检查schema
        schema = parquet_file.schema
        for field in schema:
            if str(field.type) == 'null':
                anomalies.append(f"列 '{field.name}' 数据类型为null")

        # 5. 抽样检查数据
        try:
            # 读取第一个行组作为样本
            sample = parquet_file.read_row_group(0)
            df = sample.to_pandas()

            # 检查缺失值
            missing_values = df.isnull().sum()
            for col, count in missing_values.items():
                if count > 0:
                    anomalies.append(f"列 '{col}' 有 {count} 个缺失值")

            # 数值列检查
            numeric_cols = df.select_dtypes(include=['int64', 'float64']).columns
            for col in numeric_cols:
                col_min = df[col].min()
                col_max = df[col].max()

                if pd.isna(col_min) or pd.isna(col_max):
                    anomalies.append(f"数值列 '{col}' 包含NaN值")
                elif col_min == col_max == 0:
                    anomalies.append(f"数值列 '{col}' 所有值为0")
                elif (col_max - col_min) > 1e6:
                    anomalies.append(f"数值列 '{col}' 值范围异常大({col_min}到{col_max})")

        except Exception as e:
            anomalies.append(f"数据抽样检查失败: {str(e)}")

        return (len(anomalies) > 0, anomalies, file_info)

    except Exception as e:
        anomalies.append(f"文件读取异常: {str(e)}")
        return (True, anomalies, file_info)


def validate_snappy_compression(file_path: str) -> Dict[str, Any]:
    """
    专门验证Snappy压缩的完整性

    返回:
        {
            'valid': bool,
            'compression_consistent': bool,
            'row_groups': int,
            'compression_types': list,
            'errors': list
        }
    """
    result = {
        'valid': False,
        'compression_consistent': True,
        'row_groups': 0,
        'compression_types': [],
        'errors': []
    }

    try:
        parquet_file = pq.ParquetFile(file_path)
        metadata = parquet_file.metadata
        result['row_groups'] = metadata.num_row_groups

        # 收集所有压缩类型
        compression_types = set()
        for i in range(metadata.num_row_groups):
            row_group = metadata.row_group(i)
            for j in range(row_group.num_columns):
                col = row_group.column(j)
                compression_types.add(col.compression)

        result['compression_types'] = list(compression_types)

        # 验证是否全部为SNAPPY
        if len(compression_types) != 1 or 'SNAPPY' not in compression_types:
            result['compression_consistent'] = False
            result['errors'].append(f"压缩格式不一致: {compression_types}")
        else:
            result['valid'] = True

        return result

    except Exception as e:
        result['errors'].append(str(e))
        return result