#!/usr/bin/env python3
"""
数据分布模型评估工具

该工具用于评估一组数据的最佳分布模型，支持多种常见的统计分布，
包括正态分布、负二项分布、泊松分布、指数分布等。

作者: Toolbelt
版本: 1.0.0
"""

import argparse
import sys
import json
from typing import List, Dict, Tuple, Any, Optional
import warnings
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.optimize import curve_fit

def main():
    """主函数"""
    
    # 忽略警告信息
    warnings.filterwarnings('ignore')
    
    parser = argparse.ArgumentParser(
        description="数据分布模型评估工具 - 评估数据的最佳分布模型",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
使用示例:
  distribution-evaluator data.txt --output results.json
  distribution-evaluator data.csv --column value --plot
  distribution-evaluator data.csv --column value --threshold 10.0 --plot
  distribution-evaluator data.txt --distributions normal,lognormal,exponential
  distribution-evaluator data.csv --column value --top 3 --plot --output results.json
  distribution-evaluator data.csv --column score --threshold 5.5 --method aic --verbose
        """
    )
    
    parser.add_argument(
        'input_file',
        help='输入数据文件 (支持 .txt, .csv, .xlsx 格式)'
    )
    
    parser.add_argument(
        '--column', '-c',
        help='CSV/Excel文件中要分析的列名 (如果不指定，将使用第一列数值数据)'
    )
    
    parser.add_argument(
        '--distributions', '-d',
        default='normal,lognormal,exponential,gamma,beta,weibull_min,chi2,poisson,nbinom',
        help='要测试的分布类型，用逗号分隔 (默认: normal,lognormal,exponential,gamma,beta,weibull_min,chi2,poisson,nbinom)'
    )
    
    parser.add_argument(
        '--top', '-t',
        type=int,
        default=5,
        help='显示前N个最佳分布 (默认: 5)'
    )
    
    parser.add_argument(
        '--output', '-o',
        help='输出结果到JSON文件'
    )
    
    parser.add_argument(
        '--plot', '-p',
        action='store_true',
        help='生成分布拟合图'
    )
    
    parser.add_argument(
        '--plot-output',
        help='保存图片的文件名 (默认: distribution_fit.png)'
    )
    
    parser.add_argument(
        '--method',
        choices=['ks', 'ad', 'aic', 'bic'],
        default='ks',
        help='评估方法: ks(Kolmogorov-Smirnov), ad(Anderson-Darling), aic(Akaike), bic(Bayesian) (默认: ks)'
    )
    
    parser.add_argument(
        '--threshold',
        type=float,
        help='数据过滤阈值，只保留大于等于此阈值的数据进行分析'
    )
    
    parser.add_argument(
        '--verbose', '-v',
        action='store_true',
        help='显示详细信息'
    )
    
    args = parser.parse_args()
    
    # 读取数据
    data, original_count = load_data(args.input_file, args.column, args.threshold)
    if args.verbose:
        print(f"原始数据点数量: {original_count}")
        if args.threshold is not None:
            filtered_count = len(data)
            print(f"应用阈值 {args.threshold} 后剩余数据点: {filtered_count} ({filtered_count/original_count*100:.1f}%)")
        print(f"分析数据点数量: {len(data)}")
        print(f"数据范围: {data.min():.4f} - {data.max():.4f}")
        print(f"数据均值: {data.mean():.4f}, 标准差: {data.std():.4f}")
    
    # 解析要测试的分布
    distributions = [d.strip() for d in args.distributions.split(',')]
    
    # 评估分布
    results = evaluate_distributions(data, distributions, args.method, args.verbose)
    
    # 显示结果
    print_results(results, args.top)
    
    # 保存结果到JSON文件
    if args.output:
        save_results(results, args.output, data, original_count, args)
        print(f"\n结果已保存到: {args.output}")
    
    # 生成图表
    if args.plot:
        plot_file = args.plot_output or 'distribution_fit.png'
        create_plots(data, results, plot_file, args.top)
        print(f"图表已保存到: {plot_file}")


def load_data(file_path: str, column: Optional[str] = None, threshold: Optional[float] = None) -> Tuple[np.ndarray, int]:
    """
    从文件中加载数据
    
    Args:
        file_path: 数据文件路径
        column: 指定列名（对于CSV文件）
        threshold: 数据过滤阈值，只保留大于等于此阈值的数据
        
    Returns:
        tuple: (过滤后的numpy数组格式的数据, 原始数据点数量)
    """
    if file_path.endswith('.csv'):
        df = pd.read_csv(file_path)
        if column:
            if column not in df.columns:
                raise ValueError(f"列 '{column}' 不存在于CSV文件中")
            data = df[column].dropna().values
        else:
            # 如果没有指定列，使用第一列数值数据
            numeric_columns = df.select_dtypes(include=[np.number]).columns
            if len(numeric_columns) == 0:
                raise ValueError("CSV文件中没有找到数值列")
            data = df[numeric_columns[0]].dropna().values
    elif file_path.endswith('.txt'):
        data = np.loadtxt(file_path)
    elif file_path.endswith('.json'):
        with open(file_path, 'r', encoding='utf-8') as f:
            json_data = json.load(f)
        if isinstance(json_data, list):
            data = np.array(json_data)
        elif isinstance(json_data, dict) and column:
            if column not in json_data:
                raise ValueError(f"键 '{column}' 不存在于JSON文件中")
            data = np.array(json_data[column])
        else:
            raise ValueError("JSON文件格式不支持或未指定列名")
    else:
        raise ValueError("不支持的文件格式。支持的格式: .csv, .txt, .json")
    
    # 确保数据是数值类型
    data = data.astype(float)
    
    if len(data) == 0:
        raise ValueError("数据为空")
    
    # 记录原始数据点数量
    original_count = len(data)
    
    # 应用阈值过滤
    if threshold is not None:
        data = data[data >= threshold]
        if len(data) == 0:
            raise ValueError(f"应用阈值 {threshold} 后没有剩余数据")
    
    return data, original_count


def get_distribution_info() -> Dict[str, Dict[str, Any]]:
    """
    获取支持的分布信息
    
    Returns:
        分布信息字典
    """
    from scipy import stats
    
    return {
        'normal': {
            'name': 'Normal',
            'scipy_name': 'norm',
            'params': ['loc', 'scale'],
            'param_names': ['均值', '标准差'],
            'continuous': True
        },
        'lognormal': {
            'name': 'Log-Normal',
            'scipy_name': 'lognorm',
            'params': ['s', 'loc', 'scale'],
            'param_names': ['形状参数', '位置参数', '尺度参数'],
            'continuous': True
        },
        'exponential': {
            'name': 'Exponential',
            'scipy_name': 'expon',
            'params': ['loc', 'scale'],
            'param_names': ['位置参数', '尺度参数'],
            'continuous': True
        },
        'gamma': {
            'name': 'Gamma',
            'scipy_name': 'gamma',
            'params': ['a', 'loc', 'scale'],
            'param_names': ['形状参数', '位置参数', '尺度参数'],
            'continuous': True
        },
        'beta': {
            'name': 'Beta',
            'scipy_name': 'beta',
            'params': ['a', 'b', 'loc', 'scale'],
            'param_names': ['α参数', 'β参数', '位置参数', '尺度参数'],
            'continuous': True
        },
        'weibull_min': {
            'name': 'Weibull',
            'scipy_name': 'weibull_min',
            'params': ['c', 'loc', 'scale'],
            'param_names': ['形状参数', '位置参数', '尺度参数'],
            'continuous': True
        },
        'chi2': {
            'name': 'Chi-Square',
            'scipy_name': 'chi2',
            'params': ['df', 'loc', 'scale'],
            'param_names': ['自由度', '位置参数', '尺度参数'],
            'continuous': True
        },
        'poisson': {
            'name': 'Poisson',
            'scipy_name': 'poisson',
            'params': ['mu', 'loc'],
            'param_names': ['λ参数', '位置参数'],
            'continuous': False
        },
        'nbinom': {
            'name': 'Negative Binomial',
            'scipy_name': 'nbinom',
            'params': ['n', 'p', 'loc'],
            'param_names': ['试验次数', '成功概率', '位置参数'],
            'continuous': False
        }
    }


def fit_distribution(data: np.ndarray, dist_name: str, verbose: bool = False) -> Dict[str, Any]:
    """
    拟合单个分布
    
    Args:
        data: 数据数组
        dist_name: 分布名称
        verbose: 是否显示详细信息
    
    Returns:
        拟合结果字典
    """
    dist_info = get_distribution_info()
    
    if dist_name not in dist_info:
        raise ValueError(f"不支持的分布: {dist_name}")
    
    info = dist_info[dist_name]
    scipy_dist = getattr(stats, info['scipy_name'])
    
    # 拟合参数
    if dist_name == 'poisson':
        # 泊松分布特殊处理
        if np.any(data < 0) or np.any(data != np.round(data)):
            return {
                'distribution': dist_name,
                'name': info['name'],
                'success': False,
                'error': '泊松分布要求非负整数数据'
            }
        # 泊松分布的参数估计：λ = 样本均值
        mu = np.mean(data)
        params = (mu, 0)  # (mu, loc)
    elif dist_name == 'nbinom':
        # 负二项分布特殊处理
        if np.any(data < 0) or np.any(data != np.round(data)):
            return {
                'distribution': dist_name,
                'name': info['name'],
                'success': False,
                'error': '负二项分布要求非负整数数据'
            }
        # 负二项分布的参数估计使用矩估计法
        data_int = data.astype(int)
        sample_mean = np.mean(data_int)
        sample_var = np.var(data_int)
        
        if sample_var <= sample_mean:
            # 如果方差小于等于均值，使用泊松分布更合适
            return {
                'distribution': dist_name,
                'name': info['name'],
                'success': False,
                'error': '数据方差小于等于均值，不适合负二项分布'
            }
        
        # 矩估计法计算参数
        p = sample_mean / sample_var
        n = sample_mean * p / (1 - p)
        params = (n, p, 0)  # (n, p, loc)
    else:
        params = scipy_dist.fit(data)
    
    # 计算拟合优度统计量
    if info['continuous']:
        # 连续分布使用KS检验
        ks_stat, ks_pvalue = stats.kstest(data, lambda x: scipy_dist.cdf(x, *params))
        
        # Anderson-Darling检验 (仅对部分分布支持)
        ad_stat, ad_pvalue = None, None
        if dist_name == 'normal':
            ad_result = stats.anderson(data, dist='norm')
            ad_stat = ad_result.statistic
            # 简化的p值估计
            ad_pvalue = 1.0 / (1.0 + ad_stat)
    else:
        # 离散分布使用简化的拟合优度检验
        data_int = data.astype(int)
        unique_values = np.unique(data_int)
        
        # 计算观察频率
        observed_freq = np.array([np.sum(data_int == val) for val in unique_values])
        
        # 计算期望频率
        expected_freq = np.array([len(data) * scipy_dist.pmf(val, *params) for val in unique_values])
        
        # 离散分布不支持Anderson-Darling检验
        ad_stat, ad_pvalue = None, None
        
        # 避免期望频率过小的问题
        mask = expected_freq >= 1  # 降低阈值
        if np.sum(mask) < 3:  # 至少需要3个值
            ks_stat, ks_pvalue = np.inf, 0.0
        else:
            # 确保频率总和匹配
            observed_masked = observed_freq[mask]
            expected_masked = expected_freq[mask]
            
            # 归一化期望频率以匹配观察频率总和
            expected_masked = expected_masked * np.sum(observed_masked) / np.sum(expected_masked)
            
            try:
                chi2_stat, ks_pvalue = stats.chisquare(observed_masked, expected_masked)
                ks_stat = chi2_stat
            except ValueError:
                # 如果仍然有问题，使用KS检验的近似
                ks_stat = np.sum(np.abs(observed_masked - expected_masked)) / np.sum(observed_masked)
                ks_pvalue = np.exp(-2 * len(data) * ks_stat**2)
    
    # 计算AIC和BIC
    log_likelihood = np.sum(scipy_dist.logpdf(data, *params)) if info['continuous'] else np.sum(scipy_dist.logpmf(data.astype(int), *params))
    n_params = len(params)
    n_data = len(data)
    
    aic = 2 * n_params - 2 * log_likelihood
    bic = n_params * np.log(n_data) - 2 * log_likelihood
    
    result = {
        'distribution': dist_name,
        'name': info['name'],
        'success': True,
        'parameters': dict(zip(info['param_names'], params)),
        'raw_parameters': params,
        'ks_statistic': ks_stat,
        'ks_pvalue': ks_pvalue,
        'ad_statistic': ad_stat,
        'ad_pvalue': ad_pvalue,
        'aic': aic,
        'bic': bic,
        'log_likelihood': log_likelihood,
        'continuous': info['continuous']
    }
    
    if verbose:
        print(f"  {info['name']}: KS={ks_stat:.4f} (p={ks_pvalue:.4f}), AIC={aic:.2f}, BIC={bic:.2f}")
    
    return result


def evaluate_distributions(data: np.ndarray, distributions: List[str], method: str = 'ks', verbose: bool = False) -> List[Dict[str, Any]]:
    """
    评估多个分布模型
    
    Args:
        data: 数据数组
        distributions: 要测试的分布列表
        method: 评估方法
        verbose: 是否显示详细信息
    
    Returns:
        按拟合优度排序的结果列表
    """
    if verbose:
        print(f"正在评估 {len(distributions)} 个分布模型...")
    
    results = []
    
    for dist_name in distributions:
        if verbose:
            print(f"拟合 {dist_name}...")
        
        result = fit_distribution(data, dist_name, verbose)
        if result['success']:
            results.append(result)
        elif verbose:
            print(f"  {result['name']}: 拟合失败 - {result.get('error', '未知错误')}")
    
    # 根据指定方法排序
    if method == 'ks':
        # KS统计量越小越好
        results.sort(key=lambda x: x['ks_statistic'])
    elif method == 'ad':
        # AD统计量越小越好，但可能为None
        results.sort(key=lambda x: x['ad_statistic'] if x['ad_statistic'] is not None else np.inf)
    elif method == 'aic':
        # AIC越小越好
        results.sort(key=lambda x: x['aic'])
    elif method == 'bic':
        # BIC越小越好
        results.sort(key=lambda x: x['bic'])
    
    return results


def print_results(results: List[Dict[str, Any]], top_n: int = 5):
    """
    打印评估结果
    
    Args:
        results: 评估结果列表
        top_n: 显示前N个结果
    """
    print(f"\n{'='*60}")
    print("数据分布模型评估结果")
    print(f"{'='*60}")
    
    if not results:
        print("没有成功拟合的分布模型")
        return
    
    print(f"显示前 {min(top_n, len(results))} 个最佳拟合分布:\n")
    
    for i, result in enumerate(results[:top_n], 1):
        print(f"{i}. {result['name']} ({result['distribution']})")
        print(f"   KS统计量: {result['ks_statistic']:.6f} (p值: {result['ks_pvalue']:.6f})")
        if result['ad_statistic'] is not None:
            print(f"   AD统计量: {result['ad_statistic']:.6f} (p值: {result['ad_pvalue']:.6f})")
        print(f"   AIC: {result['aic']:.2f}, BIC: {result['bic']:.2f}")
        print(f"   对数似然: {result['log_likelihood']:.2f}")
        
        print("   参数:")
        for param_name, param_value in result['parameters'].items():
            print(f"     {param_name}: {param_value:.6f}")
        print()


def save_results(results: List[Dict[str, Any]], output_file: str, data: np.ndarray, original_count: int, args):
    """
    保存结果到JSON文件
    
    Args:
        results: 评估结果
        output_file: 输出文件路径
        data: 过滤后的数据
        original_count: 原始数据点数量
        args: 命令行参数
    """
    import json
    import numpy as np
    
    output_data = {
        'metadata': {
            'input_file': args.input_file,
            'column': args.column,
            'method': args.method,
            'threshold': args.threshold,
            'original_data_points': original_count,
            'filtered_data_points': len(data),
            'data_points': len(data),
            'data_mean': float(data.mean()),
            'data_std': float(data.std()),
            'data_min': float(data.min()),
            'data_max': float(data.max())
        },
        'results': []
    }
    
    for result in results:
        # 转换numpy类型为Python原生类型
        clean_result = {}
        for key, value in result.items():
            if isinstance(value, np.ndarray):
                clean_result[key] = value.tolist()
            elif isinstance(value, (np.integer, np.floating)):
                clean_result[key] = float(value)
            elif isinstance(value, dict):
                clean_result[key] = {k: float(v) if isinstance(v, (np.integer, np.floating)) else v 
                                   for k, v in value.items()}
            else:
                clean_result[key] = value
        
        output_data['results'].append(clean_result)
    
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(output_data, f, ensure_ascii=False, indent=2)


def create_plots(data: np.ndarray, results: List[Dict[str, Any]], output_file: str, top_n: int = 3):
    """
    创建分布拟合图
    
    Args:
        data: 原始数据
        results: 评估结果
        output_file: 输出图片文件
        top_n: 显示前N个最佳分布
    """
    # 设置中文字体
    plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
    plt.rcParams['axes.unicode_minus'] = False
    
    fig, axes = plt.subplots(2, 2, figsize=(15, 12))
    fig.suptitle('Distribution Model Fitting Results', fontsize=16, fontweight='bold')
    
    # 1. 原始数据直方图
    ax1 = axes[0, 0]
    ax1.hist(data, bins=30, density=True, alpha=0.7, color='lightblue', edgecolor='black')
    ax1.set_title('Original Data Distribution', fontweight='bold')
    ax1.set_xlabel('Value')
    ax1.set_ylabel('Density')
    ax1.grid(True, alpha=0.3)
    
    # 2. 最佳拟合分布对比
    ax2 = axes[0, 1]
    ax2.hist(data, bins=30, density=True, alpha=0.5, color='lightgray', label='Original Data')
    
    colors = ['red', 'blue', 'green', 'orange', 'purple']
    x_range = np.linspace(data.min(), data.max(), 1000)
    
    for i, result in enumerate(results[:min(top_n, len(colors))]):
        if result['success']:
            dist_info = get_distribution_info()[result['distribution']]
            scipy_dist = getattr(stats, dist_info['scipy_name'])
            
            if result['continuous']:
                y_fit = scipy_dist.pdf(x_range, *result['raw_parameters'])
                ax2.plot(x_range, y_fit, color=colors[i], linewidth=2, 
                        label=f"{result['name']} (KS={result['ks_statistic']:.4f})")
    
    ax2.set_title('Best Fitting Distributions Comparison', fontweight='bold')
    ax2.set_xlabel('Value')
    ax2.set_ylabel('Density')
    ax2.legend()
    ax2.grid(True, alpha=0.3)
    
    # 3. Q-Q图 (使用最佳分布)
    ax3 = axes[1, 0]
    if results and results[0]['success']:
        best_result = results[0]
        dist_info = get_distribution_info()[best_result['distribution']]
        scipy_dist = getattr(stats, dist_info['scipy_name'])
        
        if best_result['continuous']:
            stats.probplot(data, dist=scipy_dist, sparams=best_result['raw_parameters'], plot=ax3)
            ax3.set_title(f'Q-Q Plot - {best_result["name"]}', fontweight='bold')
            ax3.grid(True, alpha=0.3)
    
    # 4. 拟合优度比较
    ax4 = axes[1, 1]
    if results:
        dist_names = [r['name'][:10] + '...' if len(r['name']) > 10 else r['name'] 
                     for r in results[:top_n]]
        ks_stats = [r['ks_statistic'] for r in results[:top_n]]
        
        bars = ax4.bar(range(len(dist_names)), ks_stats, color=colors[:len(dist_names)])
        ax4.set_title('KS Statistic Comparison (Lower is Better)', fontweight='bold')
        ax4.set_xlabel('Distribution Type')
        ax4.set_ylabel('KS Statistic')
        ax4.set_xticks(range(len(dist_names)))
        ax4.set_xticklabels(dist_names, rotation=45, ha='right')
        ax4.grid(True, alpha=0.3)
        
        # 添加数值标签
        for bar, stat in zip(bars, ks_stats):
            height = bar.get_height()
            ax4.text(bar.get_x() + bar.get_width()/2., height + height*0.01,
                    f'{stat:.4f}', ha='center', va='bottom', fontsize=9)
    
    plt.tight_layout()
    plt.savefig(output_file, dpi=300, bbox_inches='tight')
    plt.close()


if __name__ == "__main__":
    main()