#!/usr/bin/env python3
# 版本: 1.4.0
# 作者: 陈振玺
# 功能: IsoProfiler样本信息预处理工具（支持多进程并行加速、进度显示和智能跳过已完成样本）

import argparse
import os
import sys
import subprocess
import pandas as pd
from pathlib import Path
from collections import defaultdict
import multiprocessing as mp
from tqdm import tqdm


def validate_fastq_file(fq_path):
    """
    验证FASTQ文件完整性
    :param fq_path: FASTQ文件路径
    :return: True if valid, False otherwise
    """
    try:
        # 使用gzip -t命令验证文件完整性
        result = subprocess.run(['gzip', '-t', fq_path], 
                              capture_output=True, text=True)
        return result.returncode == 0
    except Exception as e:
        print(f"验证文件 {fq_path} 时出错: {e}", file=sys.stderr)
        return False


def validate_sample_fastq_files(sample_data):
    """
    验证同一个Sample的所有FASTQ文件
    :param sample_data: 包含同一Sample所有记录的DataFrame
    :return: True if all files are valid, False otherwise
    """
    for _, row in sample_data.iterrows():
        fq_path = row['FqPath']
        if pd.isna(fq_path) or fq_path.strip() == '':
            continue
        if not validate_fastq_file(fq_path):
            return False
    return True


def check_sample_success_files(sample_name, sample_data, output_dir):
    """
    检查样本的运行成功文件是否存在
    :param sample_name: 样本名称
    :param sample_data: 样本数据
    :param output_dir: 输出目录
    :return: True if any success file exists, False otherwise
    """
    # 获取该样本的所有Type和FlowCell组合
    for _, row in sample_data.iterrows():
        type_name = row['Type']
        flowcell_name = row['FlowCell']
        
        # 构建qc.tsv文件路径
        qc_file_path = Path(output_dir) / type_name / sample_name / flowcell_name / sample_name / f"{sample_name}.qc.csv"
        print(f"检查运行成功文件: {qc_file_path}")
        
        if qc_file_path.exists():
            print(f"发现运行成功文件，跳过FASTQ验证: {sample_name}")
            return True
    
    return False


def validate_sample_worker(args):
    """
    多进程工作函数：验证单个样本的FASTQ文件
    :param args: (sample_name, sample_data, output_dir) 元组
    :return: (sample_name, is_valid)
    """
    sample_name, sample_data, output_dir = args
    
    # 如果提供了输出目录，先检查运行成功文件
    if output_dir:
        if check_sample_success_files(sample_name, sample_data, output_dir):
            return sample_name, True
    
    # 如果没有运行成功文件，进行FASTQ文件完整性验证
    is_valid = validate_sample_fastq_files(sample_data)
    return sample_name, is_valid


def validate_samples_parallel(df, output_dir=None, max_workers=None):
    """
    并行验证所有样本的FASTQ文件
    :param df: 样本数据DataFrame
    :param output_dir: 输出目录（用于检查运行成功文件）
    :param max_workers: 最大进程数，默认为None（自动选择）
    :return: 有效样本名称集合
    """
    valid_samples = set()
    
    # 按Sample分组，准备参数
    sample_groups = [(sample_name, sample_data, output_dir) for sample_name, sample_data in df.groupby('Sample')]
    
    print(f"开始并行验证 {len(sample_groups)} 个样本的FASTQ文件...")
    
    # 设置默认进程数
    if max_workers is None:
        max_workers = mp.cpu_count()
    
    with mp.Pool(processes=max_workers) as pool:
        # 使用tqdm显示进度
        with tqdm(total=len(sample_groups), desc="验证FASTQ文件", unit="样本") as pbar:
            # 使用imap获取结果并显示进度
            for sample_name, is_valid in pool.imap(validate_sample_worker, sample_groups):
                if is_valid:
                    valid_samples.add(sample_name)
                    pbar.set_postfix({"有效": len(valid_samples)})
                pbar.update(1)
    
    print(f"验证完成，有效样本数: {len(valid_samples)}/{len(sample_groups)}")
    return valid_samples


def create_directory_structure(output_dir, type_name, sample_name, flowcell_name):
    """
    创建输出目录结构
    :param output_dir: 输出根目录
    :param type_name: Type名称
    :param sample_name: Sample名称
    :param flowcell_name: FlowCell名称
    :return: 创建的子目录路径
    """
    sub_dir = Path(output_dir) / type_name / sample_name / flowcell_name
    sub_dir.mkdir(parents=True, exist_ok=True)
    return sub_dir


def create_samplelist_file(sub_dir, sample_name, fq_paths):
    """
    创建samplelist.tsv文件
    :param sub_dir: 子目录路径
    :param sample_name: Sample名称
    :param fq_paths: FASTQ文件路径列表
    :return: samplelist.tsv文件路径
    """
    samplelist_path = sub_dir / 'samplelist.tsv'
    
    # 如果文件已存在，跳过
    if samplelist_path.exists():
        return samplelist_path
    
    # 写入samplelist.tsv文件
    with open(samplelist_path, 'w') as f:
        for fq_path in fq_paths:
            if pd.notna(fq_path) and fq_path.strip() != '':
                f.write(f"{sample_name}\t{fq_path}\n")
    
    return samplelist_path


def generate_singularity_command(samplelist_path, sample_name, output_subdir, 
                                bind_path, sif_path):
    """
    生成singularity执行命令
    :param samplelist_path: samplelist.tsv文件路径
    :param sample_name: Sample名称
    :param output_subdir: 输出子目录
    :param bind_path: bind路径
    :param sif_path: sif文件路径
    :return: singularity命令字符串
    """
    cmd = (f"singularity exec --bind {bind_path} -e {sif_path} "
           f"/opt/venv/bin/python3 /mnt/data/software/IsoProfiler/isoprofiler.py "
           f"-i {samplelist_path} -s {sample_name} -o {output_subdir}")
    return cmd


def process_sample_group_worker(args):
    """
    多进程工作函数：处理单个样本组合
    :param args: (group_info, output_dir, bind_path, sif_path) 元组
    :return: (命令字符串, 单独脚本路径) 或 None
    """
    group_info, output_dir, bind_path, sif_path = args
    type_name, flowcell_name, sample_name, group = group_info
    
    # 创建目录结构
    sub_dir = create_directory_structure(output_dir, type_name, sample_name, flowcell_name)
    
    # 检查是否已存在qc.tsv文件，如果存在则跳过
    qc_file_path = sub_dir / sample_name / f"{sample_name}.qc.tsv"
    if qc_file_path.exists():
        return None
    
    # 获取FASTQ文件路径列表
    fq_paths = group['FqPath'].dropna().tolist()
    if not fq_paths:
        return None
    
    # 创建samplelist.tsv文件
    samplelist_path = create_samplelist_file(sub_dir, sample_name, fq_paths)
    
    # 生成singularity命令
    cmd = generate_singularity_command(samplelist_path, sample_name, sub_dir,
                                     bind_path, sif_path)
    
    # 在子文件夹下创建单独的命令脚本
    individual_script_path = sub_dir / 'run_analysis.sh'
    with open(individual_script_path, 'w') as f:
        f.write('#!/bin/bash\n\n')
        f.write(cmd + '\n')
    os.chmod(individual_script_path, 0o755)
    
    return cmd, individual_script_path


def process_sample_groups_parallel(df_valid, output_dir, bind_path, sif_path, max_workers=None):
    """
    并行处理所有样本组合
    :param df_valid: 有效样本数据DataFrame
    :param output_dir: 输出目录
    :param bind_path: bind路径
    :param sif_path: sif文件路径
    :param max_workers: 最大进程数
    :return: 命令列表和脚本路径列表
    """
    commands = []
    script_paths = []
    skipped_count = 0
    
    # 准备所有样本组合参数
    group_args = [((type_name, flowcell_name, sample_name, group), output_dir, bind_path, sif_path) 
                  for (type_name, flowcell_name, sample_name), group 
                  in df_valid.groupby(['Type', 'FlowCell', 'Sample'])]
    
    print(f"开始并行处理 {len(group_args)} 个样本组合...")
    
    # 设置默认进程数
    if max_workers is None:
        max_workers = mp.cpu_count()
    
    with mp.Pool(processes=max_workers) as pool:
        # 使用tqdm显示进度
        with tqdm(total=len(group_args), desc="处理样本组合", unit="组合") as pbar:
            # 使用imap获取结果并显示进度
            for result in pool.imap(process_sample_group_worker, group_args):
                if result is not None:
                    cmd, script_path = result
                    commands.append(cmd)
                    script_paths.append(script_path)
                else:
                    skipped_count += 1
                pbar.set_postfix({"生成": len(commands), "跳过": skipped_count})
                pbar.update(1)
    
    print(f"处理完成，生成命令数: {len(commands)}，跳过: {skipped_count}")
    return commands, script_paths


def main():
    parser = argparse.ArgumentParser(
        description='IsoProfiler样本信息预处理工具',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
示例用法:
  iso-profiler-prep -i samples.csv -o /output/dir -s commands.sh
  iso-profiler-prep -i samples.xlsx -o /output/dir -s commands.sh --bind /custom:/custom --sif /path/to/custom.sif
  iso-profiler-prep -i samples.csv -o /output/dir -s commands.sh --threads 8
        """)
    
    parser.add_argument('-i', '--input', required=True,
                       help='样本信息表文件路径（CSV或XLSX格式）')
    parser.add_argument('-o', '--output', required=True,
                       help='输出结果目录')
    parser.add_argument('-s', '--script', required=True,
                       help='输出的sh命令文件路径')
    parser.add_argument('--bind', default='/mnt/data:/mnt/data',
                       help='singularity bind路径 (默认: /mnt/data:/mnt/data)')
    parser.add_argument('--sif', 
                       default='/mnt/data/software/IsoProfiler/images/isoprofiler_latest.sif',
                       help='sif文件路径')
    parser.add_argument('--threads', type=int, default=None,
                       help='并行验证FASTQ文件的进程数（默认为CPU核心数）')
    
    args = parser.parse_args()
    
    # 读取样本信息表
    try:
        if args.input.endswith('.xlsx'):
            df = pd.read_excel(args.input)
        else:
            df = pd.read_csv(args.input)
    except Exception as e:
        print(f"读取文件失败: {e}", file=sys.stderr)
        sys.exit(1)
    
    # 检查必需列
    required_columns = ['Sample', 'FlowCell', 'Type', 'FqPath']
    missing_columns = [col for col in required_columns if col not in df.columns]
    if missing_columns:
        print(f"缺少必需列: {missing_columns}", file=sys.stderr)
        sys.exit(1)
    
    # 创建输出目录
    Path(args.output).mkdir(parents=True, exist_ok=True)
    
    # 并行验证FASTQ文件
    valid_samples = validate_samples_parallel(df, output_dir=args.output, max_workers=args.threads)
    
    # 过滤有效样本
    df_valid = df[df['Sample'].isin(valid_samples)]
    
    # 并行处理样本组合
    commands, script_paths = process_sample_groups_parallel(df_valid, args.output, 
                                                           args.bind, args.sif, 
                                                           max_workers=args.threads)
    
    # 输出创建的脚本信息
    for script_path in script_paths:
        print(f"创建单独命令脚本: {script_path}")
    
    # 保存所有命令到文件
    with open(args.script, 'w') as f:
        f.write('#!/bin/bash\n\n')
        for cmd in commands:
            f.write(cmd + '\n')
    
    # 设置脚本文件为可执行
    os.chmod(args.script, 0o755)
    
    print(f"\n处理完成:")
    print(f"- 有效样本数: {len(valid_samples)}")
    print(f"- 生成命令数: {len(commands)}")
    print(f"- 命令文件: {args.script}")


if __name__ == '__main__':
    # 设置multiprocessing启动方法，确保跨平台兼容性
    mp.set_start_method('spawn', force=True)
    main()