#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
BioParam_Sampler: 宏基因组数据分析流程实验配置生成器

基于拉丁超立方抽样(LHS)生成宏基因组分析流程的实验配置清单。
支持多阶段串联流程的参数抽样，包括模拟数据源、质量控制、去宿主、病原鉴定等环节。

作者: Ginsea Chen
版本: 1.0.0.20250629
日期: 2025-06-29
"""

import json
import pandas as pd
import numpy as np
from scipy.stats import qmc
import argparse
import logging
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Any, Tuple
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import rcParams
import warnings
warnings.filterwarnings('ignore')


class BioParamSampler:
    """
    宏基因组参数抽样器
    
    使用拉丁超立方抽样(LHS)从多维参数空间中生成实验配置组合。
    支持分类变量和数值变量的混合抽样，以及多阶段流程的软件选择。
    """
    
    def __init__(self, config_file: str):
        """
        初始化抽样器
        
        Args:
            config_file: 配置文件路径
        """
        self.config = self._load_config(config_file)
        self.logger = self._setup_logger()
        
        # 解析配置
        self.total_experiments = self.config['total_experiments_N']
        self.random_seed = self.config['random_seed']
        self.features = self.config['features']
        
        # 初始化特征空间
        self.feature_space = []
        self.feature_names = []
        self.feature_mappings = {}
        
    def _load_config(self, config_file: str) -> Dict[str, Any]:
        """加载配置文件"""
        try:
            with open(config_file, 'r', encoding='utf-8') as f:
                return json.load(f)
        except Exception as e:
            raise ValueError(f"配置文件加载失败: {e}")
    
    def _setup_logger(self) -> logging.Logger:
        """设置日志记录器"""
        logger = logging.getLogger('BioParamSampler')
        logger.setLevel(logging.INFO)
        
        if not logger.handlers:
            handler = logging.StreamHandler()
            formatter = logging.Formatter(
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
            )
            handler.setFormatter(formatter)
            logger.addHandler(handler)
        
        return logger
    
    def _build_feature_space(self):
        """
        构建统一的特征空间
        
        将所有特征（模拟数据源、阶段通用参数、软件特定参数）
        汇集到统一的特征列表中，并建立映射关系。
        """
        self.logger.info("开始构建特征空间...")
        
        # 1. 处理模拟数据源特征
        for feature in self.features['sim_data_source']:
            self._add_feature(feature)
        
        # 2. 处理流程阶段
        for stage in self.features['pipeline_stages']:
            stage_name = stage['stage_name']
            
            # 添加软件选择特征
            software_names = [opt['software_name'] for opt in stage['software_options']]
            software_feature = {
                'name': f'pipeline_{stage_name}_tool',
                'type': 'categorical',
                'values': software_names
            }
            self._add_feature(software_feature)
            
            # 添加阶段通用参数
            if 'stage_common_params' in stage:
                for param in stage['stage_common_params']:
                    self._add_feature(param)
            
            # 添加所有软件的特定参数
            for software in stage['software_options']:
                for param in software['params']:
                    self._add_feature(param)
        
        self.logger.info(f"特征空间构建完成，共 {len(self.feature_names)} 个特征")
    
    def _add_feature(self, feature: Dict[str, Any]):
        """
        添加单个特征到特征空间
        
        Args:
            feature: 特征定义字典
        """
        name = feature['name']
        feature_type = feature['type']
        values = feature['values']
        
        self.feature_names.append(name)
        
        if feature_type == 'categorical':
            # 分类变量：映射为索引范围
            self.feature_space.append(len(values))
            self.feature_mappings[name] = {
                'type': 'categorical',
                'values': values,
                'mapping': {i: val for i, val in enumerate(values)}
            }
        else:  # numerical
            # 数值变量：使用值列表长度作为维度
            self.feature_space.append(len(values))
            self.feature_mappings[name] = {
                'type': 'numerical',
                'values': values
            }
    
    def _generate_lhs_samples(self) -> np.ndarray:
        """
        生成LHS样本
        
        Returns:
            LHS样本矩阵，形状为 (n_samples, n_features)
        """
        self.logger.info(f"开始LHS抽样，样本数: {self.total_experiments}")
        
        # 创建LHS采样器
        sampler = qmc.LatinHypercube(
            d=len(self.feature_space),
            seed=self.random_seed
        )
        
        # 生成[0,1]区间的样本
        unit_samples = sampler.random(n=self.total_experiments)
        
        # 将样本映射到各特征的取值范围
        samples = np.zeros_like(unit_samples, dtype=int)
        for i, (feature_name, dim_size) in enumerate(zip(self.feature_names, self.feature_space)):
            # 将[0,1]映射到[0, dim_size-1]的整数索引
            samples[:, i] = (unit_samples[:, i] * dim_size).astype(int)
            # 确保不超出范围
            samples[:, i] = np.clip(samples[:, i], 0, dim_size - 1)
        
        self.logger.info("LHS抽样完成")
        return samples
    
    def _map_samples_to_values(self, samples: np.ndarray) -> pd.DataFrame:
        """
        将LHS样本映射为实际参数值
        
        Args:
            samples: LHS样本矩阵
            
        Returns:
            包含实际参数值的DataFrame
        """
        self.logger.info("开始映射样本到实际参数值...")
        
        # 初始化结果字典
        result_data = {}
        
        # 为每个特征创建完整的列，初始值为N/A
        all_possible_features = self._get_all_possible_features()
        for feature_name in all_possible_features:
            result_data[feature_name] = ['N/A'] * self.total_experiments
        
        # 映射基础特征值
        for i, feature_name in enumerate(self.feature_names):
            feature_mapping = self.feature_mappings[feature_name]
            
            if feature_mapping['type'] == 'categorical':
                # 分类变量：直接映射
                for j in range(self.total_experiments):
                    idx = samples[j, i]
                    result_data[feature_name][j] = feature_mapping['mapping'][idx]
            else:  # numerical
                # 数值变量：从值列表中选择
                for j in range(self.total_experiments):
                    idx = samples[j, i]
                    result_data[feature_name][j] = feature_mapping['values'][idx]
        
        # 处理条件参数：根据软件选择激活对应参数
        self._activate_conditional_params(result_data)
        
        df = pd.DataFrame(result_data)
        self.logger.info("参数值映射完成")
        return df
    
    def _get_all_possible_features(self) -> List[str]:
        """
        获取所有可能的特征名称列表
        
        Returns:
            完整的特征名称列表
        """
        all_features = []
        
        # 模拟数据源特征
        for feature in self.features['sim_data_source']:
            all_features.append(feature['name'])
        
        # 流程阶段特征
        for stage in self.features['pipeline_stages']:
            stage_name = stage['stage_name']
            
            # 软件选择特征
            all_features.append(f'pipeline_{stage_name}_tool')
            
            # 阶段通用参数
            if 'stage_common_params' in stage:
                for param in stage['stage_common_params']:
                    all_features.append(param['name'])
            
            # 所有软件的特定参数
            for software in stage['software_options']:
                for param in software['params']:
                    all_features.append(param['name'])
        
        return all_features
    
    def _activate_conditional_params(self, result_data: Dict[str, List]):
        """
        根据软件选择激活对应的参数
        
        Args:
            result_data: 结果数据字典
        """
        for stage in self.features['pipeline_stages']:
            stage_name = stage['stage_name']
            software_col = f'pipeline_{stage_name}_tool'
            
            # 为每个实验激活对应软件的参数
            for exp_idx in range(self.total_experiments):
                selected_software = result_data[software_col][exp_idx]
                
                # 找到选中的软件配置
                selected_config = None
                for software in stage['software_options']:
                    if software['software_name'] == selected_software:
                        selected_config = software
                        break
                
                if selected_config:
                    # 激活选中软件的参数（保持已有值）
                    for param in selected_config['params']:
                        param_name = param['name']
                        # 如果参数已经有值（来自LHS），则保持不变
                        # 如果是N/A，说明该参数不在基础特征空间中，需要重新采样
                        if result_data[param_name][exp_idx] == 'N/A':
                            # 这种情况不应该发生，因为所有参数都在特征空间中
                            pass
                
                # 将未选中软件的参数设为N/A
                for software in stage['software_options']:
                    if software['software_name'] != selected_software:
                        for param in software['params']:
                            param_name = param['name']
                            result_data[param_name][exp_idx] = 'N/A'
    
    def generate_experiments(self) -> pd.DataFrame:
        """
        生成实验配置
        
        Returns:
            包含所有实验配置的DataFrame
        """
        # 构建特征空间
        self._build_feature_space()
        
        # 生成LHS样本
        samples = self._generate_lhs_samples()
        
        # 映射为实际参数值
        experiments_df = self._map_samples_to_values(samples)
        
        # 验证结果
        self._validate_experiments(experiments_df)
        
        return experiments_df
    
    def _validate_experiments(self, df: pd.DataFrame):
        """
        验证实验配置的有效性
        
        Args:
            df: 实验配置DataFrame
        """
        self.logger.info("开始验证实验配置...")
        
        # 检查重复行
        duplicates = df.duplicated().sum()
        if duplicates > 0:
            self.logger.warning(f"发现 {duplicates} 行重复配置")
        
        # 检查必要列是否存在
        required_cols = [f'pipeline_{stage["stage_name"]}_tool' 
                        for stage in self.features['pipeline_stages']]
        missing_cols = [col for col in required_cols if col not in df.columns]
        if missing_cols:
            raise ValueError(f"缺少必要的列: {missing_cols}")
        
        self.logger.info("实验配置验证通过")
    
    def save_experiments(self, df: pd.DataFrame, output_dir: str = ".") -> str:
        """
        保存实验配置到CSV文件
        
        Args:
            df: 实验配置DataFrame
            output_dir: 输出目录
            
        Returns:
            输出文件路径
        """
        # 生成文件名
        timestamp = datetime.now().strftime("%Y%m%d")
        filename = f"experiment_configurations_LHS_{timestamp}_{self.total_experiments}.csv"
        filepath = Path(output_dir) / filename
        
        # 保存文件
        df.to_csv(filepath, index=False, encoding='utf-8')
        
        self.logger.info(f"实验配置已保存到: {filepath}")
        self.logger.info(f"总实验数: {len(df)}")
        self.logger.info(f"总特征数: {len(df.columns)}")
        
        return str(filepath)


class BioParamVisualizer:
    """
    BioParam_Sampler结果可视化器
    
    提供多种图表来展示LHS抽样结果的分布、相关性和统计信息。
    """
    
    def __init__(self, experiments_df: pd.DataFrame, config: Dict[str, Any]):
        """
        初始化可视化器
        
        Args:
            experiments_df: 实验配置DataFrame
            config: 配置信息
        """
        self.df = experiments_df
        self.config = config
        self.logger = logging.getLogger('BioParamVisualizer')
        
        # 设置matplotlib字体
        rcParams['font.sans-serif'] = ['DejaVu Sans', 'Liberation Sans', 'Arial', 'sans-serif']
        rcParams['axes.unicode_minus'] = False
        
        # 设置seaborn样式
        sns.set_style("whitegrid")
        sns.set_palette("husl")
    

    

    

    
    def create_visualization_report(self, output_dir: str = ".") -> str:
        """
        创建完整的可视化报告
        
        Args:
            output_dir: 输出目录
            
        Returns:
            PDF报告文件路径
        """
        timestamp = datetime.now().strftime("%Y%m%d")
        pdf_filename = f"bioparam_visualization_report_{timestamp}.pdf"
        pdf_path = Path(output_dir) / pdf_filename
        
        self.logger.info(f"开始生成可视化报告: {pdf_path}")
        
        with PdfPages(pdf_path) as pdf:
            # 1. 软件选择分布图
            self._plot_software_distribution(pdf)
            
            # 2. 数值参数分布图
            self._plot_numerical_distributions(pdf)
            
            # 3. 参数相关性热图
            self._plot_correlation_heatmap(pdf)
            
            # 4. LHS质量评估图
            self._plot_lhs_quality(pdf)
            
            # 5. 分类参数统计图
            self._plot_categorical_statistics(pdf)
            
            # 6. 参数覆盖度分析
            self._plot_parameter_coverage(pdf)
        
        self.logger.info(f"可视化报告已保存: {pdf_path}")
        return str(pdf_path)
    
    def _plot_software_distribution(self, pdf: PdfPages):
        """绘制软件选择分布图"""
        software_cols = [col for col in self.df.columns if col.startswith('pipeline_') and col.endswith('_tool')]
        
        if not software_cols:
            return
        
        n_cols = min(3, len(software_cols))
        n_rows = (len(software_cols) + n_cols - 1) // n_cols
        
        fig, axes = plt.subplots(n_rows, n_cols, figsize=(15, 5*n_rows))
        if n_rows == 1 and n_cols == 1:
            axes = [axes]
        elif n_rows == 1 or n_cols == 1:
            axes = axes.flatten()
        else:
            axes = axes.flatten()
        
        for i, col in enumerate(software_cols):
            ax = axes[i] if i < len(axes) else None
            if ax is None:
                continue
                
            value_counts = self.df[col].value_counts()
            
            # 创建饼图
            wedges, texts, autotexts = ax.pie(
                value_counts.values, 
                labels=value_counts.index,
                autopct='%1.1f%%',
                startangle=90
            )
            
            # 设置标题
            stage_name = col.replace('pipeline_', '').replace('_tool', '')
            ax.set_title(f'{stage_name.title()} Tool Distribution', fontsize=12, fontweight='bold')
            
            # 调整文本大小
            for autotext in autotexts:
                autotext.set_color('white')
                autotext.set_fontweight('bold')
        
        # 隐藏多余的子图
        for i in range(len(software_cols), len(axes)):
            axes[i].set_visible(False)
        
        plt.suptitle('Software Selection Distribution Statistics', fontsize=16, fontweight='bold')
        plt.tight_layout()
        pdf.savefig(fig, bbox_inches='tight')
        plt.close()
    
    def _plot_numerical_distributions(self, pdf: PdfPages):
        """绘制数值参数分布图"""
        # 识别数值列
        numerical_cols = []
        for col in self.df.columns:
            if col.startswith(('sim_', 'param_')):
                # 尝试转换为数值，排除N/A值
                non_na_values = self.df[col][self.df[col] != 'N/A']
                if len(non_na_values) > 0:
                    try:
                        pd.to_numeric(non_na_values.iloc[:10])  # 测试前10个值
                        numerical_cols.append(col)
                    except (ValueError, TypeError):
                        continue
        
        if not numerical_cols:
            return
        
        # 限制显示的参数数量
        numerical_cols = numerical_cols[:12]  # 最多显示12个参数
        
        n_cols = 3
        n_rows = (len(numerical_cols) + n_cols - 1) // n_cols
        
        fig, axes = plt.subplots(n_rows, n_cols, figsize=(15, 4*n_rows))
        if n_rows == 1 and n_cols == 1:
            axes = [axes]
        elif n_rows == 1 or n_cols == 1:
            axes = axes.flatten()
        else:
            axes = axes.flatten()
        
        for i, col in enumerate(numerical_cols):
            ax = axes[i] if i < len(axes) else None
            if ax is None:
                continue
            
            # 获取非N/A的数值数据
            data = self.df[col][self.df[col] != 'N/A']
            if len(data) == 0:
                continue
                
            try:
                numeric_data = pd.to_numeric(data)
                
                # 绘制直方图
                ax.hist(numeric_data, bins=20, alpha=0.7, edgecolor='black')
                ax.set_title(col.replace('_', ' ').title(), fontsize=10)
                ax.set_xlabel('Value')
                ax.set_ylabel('Frequency')
                
                # 添加统计信息
                mean_val = numeric_data.mean()
                std_val = numeric_data.std()
                ax.axvline(mean_val, color='red', linestyle='--', alpha=0.8, 
                          label=f'Mean: {mean_val:.2f}')
                ax.legend()
                
            except (ValueError, TypeError):
                continue
        
        # 隐藏多余的子图
        for i in range(len(numerical_cols), len(axes)):
            axes[i].set_visible(False)
        
        plt.suptitle('Numerical Parameter Distributions', fontsize=16, fontweight='bold')
        plt.tight_layout()
        pdf.savefig(fig, bbox_inches='tight')
        plt.close()
    
    def _plot_correlation_heatmap(self, pdf: PdfPages):
        """绘制参数相关性热图"""
        # 选择数值列进行相关性分析
        numerical_data = pd.DataFrame()
        
        for col in self.df.columns:
            if col.startswith(('sim_', 'param_')):
                non_na_values = self.df[col][self.df[col] != 'N/A']
                if len(non_na_values) > 0:
                    try:
                        numeric_series = pd.to_numeric(non_na_values)
                        if len(numeric_series.dropna()) > 10:  # 至少10个有效值
                            # 创建完整长度的序列，N/A位置填充NaN
                            full_series = pd.Series(index=self.df.index, dtype=float)
                            full_series.loc[non_na_values.index] = numeric_series
                            numerical_data[col] = full_series
                    except (ValueError, TypeError):
                        continue
        
        if numerical_data.empty or len(numerical_data.columns) < 2:
            self.logger.warning("数值参数不足，跳过相关性分析")
            return
        
        # 计算相关性矩阵
        correlation_matrix = numerical_data.corr()
        
        # 创建热图
        fig, ax = plt.subplots(figsize=(12, 10))
        
        # 使用mask隐藏上三角
        mask = np.triu(np.ones_like(correlation_matrix, dtype=bool))
        
        sns.heatmap(
            correlation_matrix,
            mask=mask,
            annot=True,
            cmap='coolwarm',
            center=0,
            square=True,
            fmt='.2f',
            cbar_kws={"shrink": .8},
            ax=ax
        )
        
        ax.set_title('Parameter Correlation Heatmap', fontsize=16, fontweight='bold')
        
        # 旋转标签以提高可读性
        plt.xticks(rotation=45, ha='right')
        plt.yticks(rotation=0)
        
        plt.tight_layout()
        pdf.savefig(fig, bbox_inches='tight')
        plt.close()
    
    def _plot_lhs_quality(self, pdf: PdfPages):
        """绘制LHS质量评估图"""
        # 选择前几个数值参数进行LHS质量评估
        numerical_cols = []
        for col in self.df.columns:
            if col.startswith(('sim_', 'param_')):
                non_na_values = self.df[col][self.df[col] != 'N/A']
                if len(non_na_values) > 0:
                    try:
                        pd.to_numeric(non_na_values.iloc[:10])
                        numerical_cols.append(col)
                        if len(numerical_cols) >= 4:  # 最多选择4个参数
                            break
                    except (ValueError, TypeError):
                        continue
        
        if len(numerical_cols) < 2:
            return
        
        # 创建散点图矩阵
        fig, axes = plt.subplots(2, 2, figsize=(12, 10))
        axes = axes.flatten()
        
        plot_count = 0
        for i in range(len(numerical_cols)):
            for j in range(i+1, len(numerical_cols)):
                if plot_count >= 4:
                    break
                    
                col1, col2 = numerical_cols[i], numerical_cols[j]
                
                # 获取两列的数值数据
                data1 = self.df[col1][self.df[col1] != 'N/A']
                data2 = self.df[col2][self.df[col2] != 'N/A']
                
                # 找到共同的有效索引
                common_idx = data1.index.intersection(data2.index)
                if len(common_idx) < 10:
                    continue
                
                try:
                    x_data = pd.to_numeric(data1.loc[common_idx])
                    y_data = pd.to_numeric(data2.loc[common_idx])
                    
                    ax = axes[plot_count]
                    ax.scatter(x_data, y_data, alpha=0.6, s=20)
                    ax.set_xlabel(col1.replace('_', ' ').title())
                    ax.set_ylabel(col2.replace('_', ' ').title())
                    
                    ax.set_title(f'LHS Distribution: {col1.split("_")[-1]} vs {col2.split("_")[-1]}')
                    
                    plot_count += 1
                    
                except (ValueError, TypeError):
                    continue
        
        # 隐藏未使用的子图
        for i in range(plot_count, 4):
            axes[i].set_visible(False)
        
        plt.suptitle('LHS Sampling Quality Assessment - Parameter Distribution Uniformity', fontsize=16, fontweight='bold')
        plt.tight_layout()
        pdf.savefig(fig, bbox_inches='tight')
        plt.close()
    
    def _plot_categorical_statistics(self, pdf: PdfPages):
        """绘制分类参数统计图"""
        categorical_cols = []
        for col in self.df.columns:
            if col.startswith(('sim_', 'param_')) and not col.endswith('_tool'):
                unique_values = self.df[col][self.df[col] != 'N/A'].unique()
                if len(unique_values) <= 10 and len(unique_values) >= 2:  # 2-10个不同值
                    # 检查是否为分类数据
                    try:
                        pd.to_numeric(unique_values[:3])  # 如果前3个值都是数值，跳过
                    except (ValueError, TypeError):
                        categorical_cols.append(col)
        
        if not categorical_cols:
            return
        
        # 限制显示数量
        categorical_cols = categorical_cols[:6]
        
        n_cols = 2
        n_rows = (len(categorical_cols) + n_cols - 1) // n_cols
        
        fig, axes = plt.subplots(n_rows, n_cols, figsize=(12, 4*n_rows))
        if n_rows == 1 and n_cols == 1:
            axes = [axes]
        elif n_rows == 1 or n_cols == 1:
            axes = axes.flatten()
        else:
            axes = axes.flatten()
        
        for i, col in enumerate(categorical_cols):
            ax = axes[i] if i < len(axes) else None
            if ax is None:
                continue
            
            data = self.df[col][self.df[col] != 'N/A']
            value_counts = data.value_counts()
            
            # 创建条形图
            bars = ax.bar(range(len(value_counts)), value_counts.values)
            ax.set_xticks(range(len(value_counts)))
            ax.set_xticklabels(value_counts.index, rotation=45, ha='right')
            ax.set_title(col.replace('_', ' ').title())
            ax.set_ylabel('Count')
            
            # 添加数值标签
            for bar, count in zip(bars, value_counts.values):
                ax.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.5,
                       str(count), ha='center', va='bottom')
        
        # 隐藏多余的子图
        for i in range(len(categorical_cols), len(axes)):
            axes[i].set_visible(False)
        
        plt.suptitle('Categorical Parameter Statistics', fontsize=16, fontweight='bold')
        plt.tight_layout()
        pdf.savefig(fig, bbox_inches='tight')
        plt.close()
    
    def _plot_parameter_coverage(self, pdf: PdfPages):
        """绘制参数覆盖度分析图"""
        # 统计每个参数的有效值比例
        coverage_data = []
        
        for col in self.df.columns:
            if col.startswith(('sim_', 'param_', 'pipeline_')):
                total_count = len(self.df)
                valid_count = len(self.df[self.df[col] != 'N/A'])
                coverage_ratio = valid_count / total_count
                
                coverage_data.append({
                    'parameter': col,
                    'coverage_ratio': coverage_ratio,
                    'valid_count': valid_count,
                    'total_count': total_count
                })
        
        if not coverage_data:
            return
        
        coverage_df = pd.DataFrame(coverage_data)
        coverage_df = coverage_df.sort_values('coverage_ratio', ascending=True)
        
        # 创建水平条形图
        fig, ax = plt.subplots(figsize=(12, max(8, len(coverage_df) * 0.3)))
        
        bars = ax.barh(range(len(coverage_df)), coverage_df['coverage_ratio'])
        
        # 设置颜色：覆盖率高的为绿色，低的为红色
        colors = ['red' if ratio < 0.5 else 'orange' if ratio < 0.8 else 'green' 
                 for ratio in coverage_df['coverage_ratio']]
        for bar, color in zip(bars, colors):
            bar.set_color(color)
            bar.set_alpha(0.7)
        
        ax.set_yticks(range(len(coverage_df)))
        ax.set_yticklabels([p.replace('_', ' ') for p in coverage_df['parameter']])
        ax.set_xlabel('Parameter Coverage Rate')
        ax.set_title('Parameter Activation Coverage Analysis', fontsize=16, fontweight='bold')
        ax.set_xlim(0, 1)
        
        # 添加百分比标签
        for i, (bar, ratio) in enumerate(zip(bars, coverage_df['coverage_ratio'])):
            ax.text(bar.get_width() + 0.01, bar.get_y() + bar.get_height()/2,
                   f'{ratio:.1%}', va='center', ha='left')
        
        # 添加图例
        from matplotlib.patches import Patch
        legend_elements = [
            Patch(facecolor='green', alpha=0.7, label='High Coverage (≥80%)'),
            Patch(facecolor='orange', alpha=0.7, label='Medium Coverage (50-80%)'),
            Patch(facecolor='red', alpha=0.7, label='Low Coverage (<50%)')
        ]
        ax.legend(handles=legend_elements, loc='lower right')
        
        plt.tight_layout()
        pdf.savefig(fig, bbox_inches='tight')
        plt.close()


def main():
    """
    主函数：命令行接口
    """
    parser = argparse.ArgumentParser(
        description='BioParam_Sampler: 宏基因组参数抽样工具',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
示例用法:
  python bioparam_sampler.py -c example_bioparam_config.json -o ./output
  python bioparam_sampler.py --config config.json --output-dir results/
        """
    )
    
    parser.add_argument(
        '-c', '--config',
        required=True,
        help='配置文件路径 (JSON格式)'
    )
    
    parser.add_argument(
        '-o', '--output-dir',
        default='.',
        help='输出目录 (默认: 当前目录)'
    )
    
    parser.add_argument(
        '-v', '--verbose',
        action='store_true',
        help='详细输出模式'
    )
    
    parser.add_argument(
        '--visualize',
        action='store_true',
        help='生成可视化报告 (需要matplotlib和seaborn)'
    )
    
    parser.add_argument(
        '--no-csv',
        action='store_true',
        help='跳过CSV文件生成，仅生成可视化报告'
    )
    
    args = parser.parse_args()
    
    # 设置日志级别
    if args.verbose:
        logging.getLogger('BioParamSampler').setLevel(logging.DEBUG)
    
    try:
        # 创建抽样器
        sampler = BioParamSampler(args.config)
        
        # 生成实验配置
        experiments_df = sampler.generate_experiments()
        
        output_files = []
        
        # 保存CSV文件（除非指定跳过）
        if not args.no_csv:
            csv_file = sampler.save_experiments(experiments_df, args.output_dir)
            output_files.append(csv_file)
        
        # 生成可视化报告
        if args.visualize:
            try:
                visualizer = BioParamVisualizer(experiments_df, sampler.config)
                pdf_file = visualizer.create_visualization_report(args.output_dir)
                output_files.append(pdf_file)
                print(f"📈 可视化报告: {pdf_file}")
            except ImportError as e:
                print(f"⚠️  可视化功能需要安装额外依赖: pip install matplotlib seaborn")
                print(f"   错误详情: {e}")
            except Exception as e:
                print(f"⚠️  可视化生成失败: {e}")
        
        print(f"\n✅ 任务完成!")
        if not args.no_csv:
            print(f"📁 CSV文件: {output_files[0]}")
        print(f"📊 实验数量: {len(experiments_df)}")
        print(f"🔧 参数维度: {len(experiments_df.columns)}")
        
        if args.visualize and len(output_files) > 1:
            print(f"\n💡 提示: 可视化报告包含以下内容:")
            print(f"   • 软件选择分布统计")
            print(f"   • 数值参数分布直方图")
            print(f"   • 参数相关性热图")
            print(f"   • LHS抽样质量评估")
            print(f"   • 分类参数统计")
            print(f"   • 参数覆盖度分析")
        
    except Exception as e:
        print(f"❌ 错误: {e}")
        return 1
    
    return 0


if __name__ == '__main__':
    exit(main())