import argparse
from multiprocessing import Process, Queue
from tqdm import tqdm
import sys
import time
from deepsignal3.utils.ref_reader import get_contig2len,get_contig2len_n_seq
from deepsignal3.utils.process_utils import get_refloc_of_methysite_in_motif
from deepsignal3.utils.process_utils import get_motif_seqs


def read_tsv(file_path, filter_col_10=False):
    """
    读取TSV文件并返回数据字典。
    :param file_path: TSV文件路径
    :param filter_col_10: 是否过滤第10列值 >= 5
    :return: 数据字典 {key: (坐标, value)}
    """
    data = {}
    with open(file_path, 'r') as file:
        for line in file:
            parts = line.strip().split('\t')
            if len(parts) < 11:
                continue  # 跳过无效行
            key = (parts[0], parts[1])  # 使用前两列作为key
            if parts[0] != 'chr1':
                continue
            col_10_value = int(parts[9])  # 第10列
            col_11_value = float(parts[10]) / 100.0  # 第11列的值除以100
            if filter_col_10:
                if col_10_value >= 5:
                    data[key] = (int(parts[1]), col_11_value)
            else:
                data[key] = (int(parts[1]), col_11_value)
    return data


def create_coordinate_map(reference_data):
    """
    Create a map of coordinates to (key, value) for quick lookups.
    """
    coord_map = {coord: (key, value) for key, (coord, value) in reference_data.items()}
    return coord_map


def calculate_density_with_accumulated_ranges(center_data, reference_data, ranges, queue):
    """
    Accumulate density calculations and send results to the queue.
    """
    #coord_map = create_coordinate_map(reference_data)
    #coord_list = sorted(coord_map.keys())  # Sort coordinates for easier range lookup

    density_results = {}

    for key, (center, _) in center_data.items():
        density_results[key] = {}
        try:
            center_idx = reference_data.index(center)  # Get index of the center in the sorted list
        except ValueError:
            continue  # 如果center不在列表中，跳过该密度计算

        total_count = 0
        left_index = center_idx - 1
        right_index = center_idx + 1

        for r in ranges:
            

            # Expand left
            while left_index >= 0 and center - reference_data[left_index] <= r:
                total_count += 1
                left_index -= 1

            # Expand right
            while right_index < len(reference_data) and reference_data[right_index] - center <= r:
                total_count += 1
                right_index += 1

            density_results[key][r] = total_count / (2 * r) if r > 0 else 0

        # 将结果推送到输出队列
        queue.put((key, density_results[key]))


def output_results(queue, ranges):
    """
    Continuously listen to the queue and output results as they are available.
    """
    while True:
        result = queue.get()
        if result == 'done':
            break  # Stop if the 'done' signal is received
        key, densities = result
        print(f'{key}\t' + "\t".join(f"{densities[r]:.4f}" for r in ranges), flush=True)
        sys.stdout.flush()


def worker(queue, reference_data, ranges, result_queue):
    """
    Worker process to get batches from the queue and calculate densities.
    """
    while True:
        if queue.empty():
            time.sleep(0.1)
        batch_data = queue.get()
        
        if batch_data == 'done':
            break

        calculate_density_with_accumulated_ranges(batch_data, reference_data, ranges, result_queue)


def process_density_in_batches(data, reference_data, ranges, batch_size=20, num_workers=40):
    """
    Use a fixed number of worker processes to process batches of data in parallel.
    """
    keys = list(data.keys())
    batches = [keys[i:i + batch_size] for i in range(0, len(keys), batch_size)]
    
    queue = Queue()  # Queue to hold batches of data
    result_queue = Queue()  # Queue to hold results for output

    # Start output process
    output_process = Process(target=output_results, args=(result_queue, ranges))
    output_process.start()

    # Start worker processes
    workers = []
    for _ in range(num_workers):
        p = Process(target=worker, args=(queue, reference_data, ranges, result_queue))
        workers.append(p)
        p.start()

    # Feed batches into the queue
    for batch in batches:
        batch_data = {key: data[key] for key in batch}
        queue.put(batch_data)

    # Notify workers that processing is done
    for _ in range(num_workers):
        queue.put('done')

    # Wait for workers to finish
    for p in workers:
        p.join()

    # Notify output process that all results are done
    result_queue.put('done')
    output_process.join()

def categorize_keys(file1_data, file2_data):
    """
    对文件1和文件2的key进行分类。
    :param file1_data: 文件1的数据字典
    :param file2_data: 文件2的数据字典
    :return: 分类字典 {'low_low': [], 'low_high': [], 'high_low': [], 'high_high': []}
    """
    categories = {'low_low': [], 'low_high': [], 'high_low': [], 'high_high': []}
    for key in file1_data:
        if key in file2_data:
            category1 = classify_value(file1_data[key][1])
            category2 = classify_value(file2_data[key][1])
            if category1 == 'low' and category2 == 'low':
                categories['low_low'].append(key)
            elif category1 == 'low' and category2 == 'high':
                categories['low_high'].append(key)
            elif category1 == 'high' and category2 == 'low':
                categories['high_low'].append(key)
            elif category1 == 'high' and category2 == 'high':
                categories['high_high'].append(key)
    return categories

def classify_value(value):
    """
    根据值将其分类。
    :param value: 浮点值
    :return: 分类 'low', 'high', or 'middle'
    """
    if 0 <= value <= 0.1:
        return 'low'
    elif 0.9 <= value <= 1:
        return 'high'
    else:
        return 'middle'

def main(file1, file2,reference_path,category_choose):
    # Load data from file1
    #reference_path='/public/data/hpc174601028/xiaoyf/reference/chm13v2.0.fa'
    chrom2len,contigs = get_contig2len_n_seq(reference_path)
    
    motif_seqs = get_motif_seqs('CG',True)
    tsite_locs = get_refloc_of_methysite_in_motif(
                            contigs['chr1'], set(motif_seqs),0)
    ref_data=sorted(tsite_locs)

    file1_data = read_tsv(file1, filter_col_10=True)
    #ref_data = read_tsv(file1, filter_col_10=False)

    file2_data = read_tsv(file2, filter_col_10=True)

    #categories = categorize_keys(file1_data, file2_data)
    

    ranges = list(range(10, 101, 10))  # Density ranges: 10, 20, ..., 100

    print("\nCalculating densities for file...")

    process_density_in_batches(file2_data, ref_data, ranges)
    #process_density_in_batches(file1_data, ref_data, ranges)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Calculate densities for file1 and output progressively.")
    parser.add_argument("file1", help="Path to the first TSV file")
    parser.add_argument("file2", help="第二个TSV文件路径")
    parser.add_argument('-c')
    parser.add_argument('-ref',default='/public/data/hpc174601028/xiaoyf/reference/chm13v2.0.fa',type=str,required=False)

    args = parser.parse_args()

    main(args.file1, args.file2,args.ref, args.c)