import argparse
from collections import defaultdict
from tqdm import tqdm

def preprocess_tsv(input_tsv, output_tsv, chunk_size):
    # 用于存储按 read_id 和 chromosome 分组的数据
    grouped = defaultdict(lambda: {"positions": [], "predictions": []})
    
    # 计算总行数以显示进度条
    total_lines = sum(1 for _ in open(input_tsv, 'r'))
    
    # 检测输入文件的列数以确定格式
    with open(input_tsv, 'r') as infile:
        first_line = infile.readline().strip()
        if first_line:
            num_columns = len(first_line.split("\t"))
        else:
            raise ValueError("Input TSV is empty")
    
    # 逐块读取 TSV 文件
    with open(input_tsv, 'r') as infile:
        chunk = []
        line_count = 0
        infile.seek(0)  # 重置文件指针到开头
        for line in tqdm(infile, total=total_lines, desc="Reading TSV"):
            chunk.append(line.strip())
            line_count += 1
            if line_count >= chunk_size:
                process_chunk(chunk, grouped, num_columns)
                chunk = []
                line_count = 0
        if chunk:  # 处理剩余行
            process_chunk(chunk, grouped, num_columns)
    
    # 写入合并后的 TSV 文件
    with open(output_tsv, 'w') as outfile:
        for (read_id, chromosome), data in tqdm(grouped.items(), desc="Writing TSV"):
            positions = ",".join(map(str, data["positions"]))
            predictions = ",".join(map(str, data["predictions"]))
            outfile.write(f"{read_id}\t{chromosome}\t{positions}\t{predictions}\n")
    
    print(f"Preprocessed TSV saved to {output_tsv}")

def process_chunk(chunk, grouped, num_columns):
    # 处理一块行
    for line in chunk:
        if not line:
            continue
        fields = line.split("\t")
        if len(fields) != num_columns:
            continue  # 跳过列数不匹配的行
        
        if num_columns >= 10:
            # 新格式: chr, ref_pos, strand, read_pos, read_name, read_length, prob_0, prob_1, label, kmer
            chromosome, position, _, _, read_id, _, prob_0, prob_1, _, _ = fields[:10]
            key = (read_id, chromosome)
            grouped[key]["positions"].append(position)
            grouped[key]["predictions"].append(prob_1)
        elif num_columns >= 4:
            # 原格式: read_id, chromosome, position, prediction
            read_id, chromosome, position, prediction = fields[:4]
            key = (read_id, chromosome)
            grouped[key]["positions"].append(position)
            grouped[key]["predictions"].append(prediction)

def main():
    parser = argparse.ArgumentParser(description="Preprocess TSV file by grouping rows with the same read_id/read_name and chromosome.")
    parser.add_argument("--input_tsv", "-i", type=str, required=True, help="Input TSV file path")
    parser.add_argument("--output_tsv", "-o", type=str, required=True, help="Output TSV file path")
    parser.add_argument("--chunk_size", "-c", type=int, default=100000, help="Number of lines to process per chunk")
    
    args = parser.parse_args()
    
    preprocess_tsv(args.input_tsv, args.output_tsv, args.chunk_size)

if __name__ == "__main__":
    main()