# import argparse
# from collections import defaultdict
# from tqdm import tqdm
# from concurrent.futures import ThreadPoolExecutor
# import multiprocessing

# def read_tsv(file_path, filter_col_10=False):
#     """
#     读取TSV文件，并返回一个字典。根据是否启用过滤，决定是否仅保留第10列大于等于5的行。
#     """
#     data = {}
#     with open(file_path, 'r') as file:
#         for line in file:
#             parts = line.strip().split('\t')
#             if len(parts) < 11:
#                 continue  # 跳过行数不足的行
#             if parts[0] != 'chr1':
#                 continue  # 跳过不是chr1的行
#             key = (parts[0], parts[1])  # 使用前两列作为key
#             col_10_value = int(parts[9])  # 第10列作为过滤条件
#             col_11_value = float(parts[10]) / 100.0  # 第11列数值归一化
#             if filter_col_10:
#                 if col_10_value >= 5:
#                     data[key] = (int(parts[1]), col_11_value)  # 保存第二列坐标和第11列值
#             else:
#                 data[key] = (int(parts[1]), col_11_value)
#     return data

# def classify_value(value):
#     """
#     将第11列值分类为低、中、高三个类别。
#     """
#     if 0 <= value <= 0.3:
#         return 'low'
#     elif 0.7 <= value <= 1:
#         return 'high'
#     else:
#         return 'middle'

# def categorize_keys(file1_data, file2_data):
#     """
#     根据两个文件的数据，将key分为四类。
#     """
#     categories = {'low_low': [], 'low_high': [], 'high_low': [], 'high_high': []}
    
#     for key in file1_data:
#         if key in file2_data:
#             category1 = classify_value(file1_data[key][1])  # 分类文件1的值
#             category2 = classify_value(file2_data[key][1])  # 分类文件2的值
#             if category1 == 'low' and category2 == 'low':
#                 categories['low_low'].append(key)
#             elif category1 == 'low' and category2 == 'high':
#                 categories['low_high'].append(key)
#             elif category1 == 'high' and category2 == 'low':
#                 categories['high_low'].append(key)
#             elif category1 == 'high' and category2 == 'high':
#                 categories['high_high'].append(key)
    
#     return categories

# def filter_same_range(file1_data, file2_data, same_range=True):
#     """
#     根据文件1和文件2的值是否在同一区间，筛选符合条件的点。
#     """
#     filtered_data = {}
#     for key, (coord2, value2) in file2_data.items():
#         if key in file1_data:
#             coord1, value1 = file1_data[key]
#             # 同区间过滤
#             if (0 <= value1 <= 0.3 and 0 <= value2 <= 0.3) or (0.7 <= value1 <= 1 and 0.7 <= value2 <= 1):
#                 if same_range:
#                     filtered_data[key] = (coord2, value2)
#             else:
#                 if not same_range:
#                     filtered_data[key] = (coord2, value2)
#     return filtered_data

# # def calculate_density(filtered_data, reference_data, ranges):
# #     """
# #     使用中心点向外延伸的双指针方法来计算交集点的浓度，同时一次性计算所有范围的浓度。
# #     :param filtered_data: 交集中的点 (key, (坐标, value))
# #     :param reference_data: 文件1中的所有点 (key, (坐标, value))
# #     :param ranges: 要计算的范围列表 [10, 20, ..., 100]
# #     :return: 浓度计算结果
# #     """
# #     density_results = defaultdict(dict)
    
# #     # 将 reference_data 按第2列的值（坐标）排序
# #     sorted_ref_data = sorted(reference_data.items(), key=lambda k: k[1][0])
# #     range_limits = {r: 0 for r in ranges}  # 存储每个范围内的点数

# #     for key, (center, _) in tqdm(filtered_data.items(),total=len(filtered_data)):
# #         # 初始化每个范围内的点数
# #         range_counts = {r: 0 for r in ranges}

# #         left_index = 0
# #         right_index = 0
        
# #         # 找到初始的左右指针
# #         while right_index < len(sorted_ref_data) and sorted_ref_data[right_index][1][0] < center:
# #             right_index += 1
# #         left_index = right_index - 1

# #         # 从中心点向左延伸，计算所有range的点
# #         while left_index >= 0:
# #             ref_coord_left = sorted_ref_data[left_index][1][0]
# #             distance = center - ref_coord_left  # 左侧点距离

# #             if distance > max(ranges):  # 如果超出最大范围，停止向左扩展
# #                 break

# #             # 对所有在范围内的range进行计数
# #             for r in ranges:
# #                 if distance <= r:
# #                     range_counts[r] += 1

# #             left_index -= 1

# #         # 从中心点向右延伸，计算所有range的点
# #         while right_index < len(sorted_ref_data):
# #             ref_coord_right = sorted_ref_data[right_index][1][0]
# #             distance = ref_coord_right - center  # 右侧点距离

# #             if distance > max(ranges):  # 如果超出最大范围，停止向右扩展
# #                 break

# #             # 对所有在范围内的range进行计数
# #             for r in ranges:
# #                 if distance <= r:
# #                     range_counts[r] += 1

# #             right_index += 1

# #         # 计算并保存每个范围的浓度
# #         for r in ranges:
# #             density_results[key][r] = range_counts[r] / r if r > 0 else 0  # 浓度 = 在范围内的点数 / 范围长度

# #     return density_results

# def create_coordinate_map(reference_data):
#     """
#     创建以坐标为索引的数据结构，以便快速定位中心点。
#     :param reference_data: 文件1中的所有点 (key, (坐标, value))
#     :return: 坐标到 (key, value) 的映射字典
#     """
#     coord_map = {coord: (key, value) for key, (coord, value) in reference_data.items()}
#     return coord_map

# # def calculate_density_with_accumulated_ranges(center_data, reference_data, ranges):
# #     """
# #     直接通过坐标查找的方式累积计算交集点的浓度，同时使用多线程加速。
# #     :param center_data: 交集中的点 (key, (坐标, value))
# #     :param reference_data: 文件1中的所有点 (key, (坐标, value))
# #     :param ranges: 要计算的范围列表 [10, 20, ..., 100]
# #     :return: 直接打印计算结果
# #     """
# #     coord_map = create_coordinate_map(reference_data)
# #     coord_list = sorted(coord_map.keys())  # 按照坐标排序，方便扩展

# #     def process_center(center_key, center, ranges):
# #         density_results = {}

# #         # 找到 center 的索引
# #         center_idx = coord_list.index(center)

# #         total_count = 0
# #         left_index = center_idx - 1
# #         right_index = center_idx + 1

# #         for r in ranges:
# #             # 向左扩展查找，计算新进入范围的点
# #             while left_index >= 0 and center - coord_list[left_index] <= r:
# #                 total_count += 1
# #                 left_index -= 1

# #             # 向右扩展查找，计算新进入范围的点
# #             while right_index < len(coord_list) and coord_list[right_index] - center <= r:
# #                 total_count += 1
# #                 right_index += 1

# #             density_results[r] = total_count / (2 * r) if r > 0 else 0

# #         # 直接输出结果
# #         print(f"{center_key}\t" + "\t".join(f"{density_results[r]:.4f}" for r in ranges), flush=True)

# #     # 使用多线程来加速处理
# #     with ThreadPoolExecutor() as executor:
# #         tasks = []
# #         for key, (center, _) in tqdm(center_data.items(), desc="Processing centers"):
# #             tasks.append(executor.submit(process_center, key, center, ranges))

# #         for task in tasks:
# #             task.result()  # 确保所有任务完成
# def calculate_density_with_accumulated_ranges(filtered_data,coord_list, ranges):
#     """
#     直接通过坐标查找的方式累积计算交集点的浓度，同时使用tqdm管理进度。
#     :param filtered_data: 交集中的点 (key, (坐标, value))
#     :param reference_data: 文件1中的所有点 (key, (坐标, value))
#     :param ranges: 要计算的范围列表 [10, 20, ..., 100]
#     :return: 直接打印计算结果
#     """
    

#     # 逐行处理 filtered_data 的每个点
#     for key, (center, _) in filtered_data.items():
#         density_results = {}
#         center_idx = coord_list.index(center)
#         total_count = 0
#         left_index = center_idx - 1
#         right_index = center_idx + 1

#         for r in ranges:
#             while left_index >= 0 and center - coord_list[left_index] <= r:
#                 total_count += 1
#                 left_index -= 1
#             while right_index < len(coord_list) and coord_list[right_index] - center <= r:
#                 total_count += 1
#                 right_index += 1
#             density_results[r] = total_count / (2 * r) if r > 0 else 0

#         print(f"{key}\t" + "\t".join(f"{density_results[r]:.4f}" for r in ranges), flush=True)

# def calculate_density_for_categories_multiprocessing(categories, file2_data, coord_list, ranges):
#     """
#     使用多进程计算文件2中四个分类区域的浓度。
#     :param categories: 文件1和文件2分类结果的字典 (low_low, low_high, high_low, high_high)
#     :param file2_data: 文件2中的数据
#     :param file1_data_all: 文件1的完整数据
#     :param ranges: 要计算的范围列表 [10, 20, ..., 100]
#     """
#     # 使用 multiprocessing.Pool 并行处理
#     with multiprocessing.Pool(processes=40) as pool:
#         # 创建任务并行处理
#         results = []
#         for category, keys in categories.items():
#             results.append(
#                 pool.apply_async(calculate_density_for_category, (category, keys, file2_data, coord_list, ranges))
#             )
        
#         # 等待所有任务完成并处理结果
#         for result in tqdm(results, total=len(results), desc="Processing categories"):
#             category, count = result.get()
#             print(f"Category {category} processed {count} keys.")
# # def calculate_density_for_categories(categories, file2_data, file1_data_all, ranges):
# #     """
# #     分别计算文件2中四个分类区域的浓度。
# #     :param categories: 文件1和文件2分类结果的字典 (low_low, low_high, high_low, high_high)
# #     :param file2_data: 文件2中的数据
# #     :param file1_data_all: 文件1的完整数据
# #     :param ranges: 要计算的范围列表 [10, 20, ..., 100]
# #     """
# #     for category, keys in categories.items():
# #         print(f"\nCalculating density for category: {category}")
# #         filtered_data = {key: file2_data[key] for key in keys if key in file2_data}
# #         calculate_density_with_accumulated_ranges(filtered_data, file1_data_all, ranges)

# def calculate_density_for_category(category, keys, file2_data, coord_list, ranges):
#     """
#     计算某个分类的浓度。
#     :param category: 分类名称
#     :param keys: 该分类的key列表
#     :param file2_data: 文件2中的数据
#     :param file1_data_all: 文件1的完整数据
#     :param ranges: 要计算的范围列表 [10, 20, ..., 100]
#     """
#     print(f"\nCalculating density for category: {category}")
#     filtered_data = {key: file2_data[key] for key in keys if key in file2_data}
#     calculate_density_with_accumulated_ranges(filtered_data, coord_list, ranges)
#     return category, len(filtered_data)  # 返回每个分类的处理结果，用于结果汇总



# def main(file1, file2, file3):
#     # 读取文件1（带过滤和不带过滤的版本）
#     file1_data_filtered = read_tsv(file1, filter_col_10=True)
#     file1_data_all = read_tsv(file1, filter_col_10=False)
    
#     # 读取文件2和文件3
#     file3_data = read_tsv(file3, filter_col_10=True)
#     #file3_data = read_tsv(file3, filter_col_10=True)
#     categories = categorize_keys(file1_data_filtered, file3_data)
#     # 分类
#     #categories = categorize_keys(file1_data_filtered, file3_data)
    
#     print("\n分类结果:")
#     for category, keys in categories.items():
#         print(f'{category}: {len(keys)} keys')

#     # 筛选文件2和文件3的交集
#     #file2_filtered = filter_same_range(file1_data_filtered, file2_data, same_range=True)
#     #file3_filtered = filter_same_range(file1_data_filtered, file3_data, same_range=False)

#     #common_keys = set(file2_filtered.keys()).intersection(set(file3_filtered.keys()))
#     #common_filtered_data = {key: file2_filtered[key] for key in common_keys}
    
#     #print("\n交集的key数:", len(common_keys))

#     # 要计算的范围
#     ranges = list(range(10, 101, 10))

#     # 计算并输出浓度结果
#     #print("\n交集点的浓度计算结果:")
#     #calculate_density_with_accumulated_ranges(common_filtered_data, file1_data_all, ranges)
#     #calculate_density_for_categories(categories, file3_data, file1_data_all, ranges)
#     coord_map = create_coordinate_map(file1_data_all)
#     coord_list = sorted(coord_map.keys())  # 按照坐标排序，方便扩展
#     calculate_density_for_categories_multiprocessing(categories, file3_data, coord_list, ranges)


# if __name__ == "__main__":
#     parser = argparse.ArgumentParser(description="分类三个TSV文件中的key，筛选交集并计算浓度")
#     parser.add_argument("file1", help="第一个TSV文件路径")
#     parser.add_argument("file2", help="第二个TSV文件路径")
#     parser.add_argument("file3", help="第三个TSV文件路径")
    
#     args = parser.parse_args()
    
#     main(args.file1, args.file2, args.file3)

# import argparse
# import multiprocessing
# from multiprocessing import Queue, Process
# import sys
# import time

# def read_tsv(file_path, filter_col_10=False):
#     """
#     读取TSV文件并返回数据字典。
#     :param file_path: TSV文件路径
#     :param filter_col_10: 是否过滤第10列值 >= 5
#     :return: 数据字典 {key: (坐标, value)}
#     """
#     data = {}
#     with open(file_path, 'r') as file:
#         for line in file:
#             parts = line.strip().split('\t')
#             if len(parts) < 11:
#                 continue  # 跳过无效行
#             key = (parts[0], parts[1])  # 使用前两列作为key
#             if parts[0] != 'chr1':
#                 continue
#             col_10_value = int(parts[9])  # 第10列
#             col_11_value = float(parts[10]) / 100.0  # 第11列的值除以100
#             if filter_col_10:
#                 if col_10_value >= 5:
#                     data[key] = (int(parts[1]), col_11_value)
#             else:
#                 data[key] = (int(parts[1]), col_11_value)
#     return data

# def create_coordinate_map(reference_data):
#     """
#     创建以坐标为索引的数据结构，以便快速定位中心点。
#     :param reference_data: 文件1中的所有点 (key, (坐标, value))
#     :return: 坐标到 (key, value) 的映射字典
#     """
#     coord_map = {coord: (key, value) for key, (coord, value) in reference_data.items()}
#     return coord_map

# def process_chunk(chunk, file2_data, coord_list, ranges, result_queue):
#     """
#     处理数据块，计算浓度，并将结果放入队列。
#     :param chunk: key的部分列表 (数据块)
#     :param file2_data: 文件2中的数据
#     :param coord_list: 坐标列表
#     :param ranges: 要计算的范围列表 [10, 20, ..., 100]
#     :param result_queue: 用于存储结果的队列
#     """
#     for key in chunk:
#         center, _ = file2_data[key]
#         density_results = {}
#         try:
#             center_idx = coord_list.index(center)
#         except ValueError:
#             print(f"Key {key} not found in coord_list",flush=True)
#             continue

#         total_count = 0
#         left_index = center_idx - 1
#         right_index = center_idx + 1

#         for r in ranges:
#             while left_index >= 0 and center - coord_list[left_index] <= r:
#                 total_count += 1
#                 left_index -= 1
#             while right_index < len(coord_list) and coord_list[right_index] - center <= r:
#                 total_count += 1
#                 right_index += 1
#             density_results[r] = total_count / (2 * r) if r > 0 else 0

#         result_queue.put((key, density_results))

# def output_results(result_queue, ranges):
#     """
#     独立进程，负责输出计算结果。
#     :param result_queue: 包含计算结果的队列
#     :param ranges: 要计算的范围列表 [10, 20, ..., 100]
#     """
#     while True:
#         result = result_queue.get()
#         if result == 'DONE':
#             break
#         key, density_results = result
#         print(f"{key}\t" + "\t".join(f"{density_results[r]:.4f}" for r in ranges), flush=True)
#         sys.stdout.flush()

# def worker_process(chunk_queue, file2_data, coord_list, ranges, result_queue):
#     """
#     工作进程，从队列中获取数据块并处理。
#     :param chunk_queue: 数据块队列
#     :param file2_data: 文件2中的数据
#     :param coord_list: 坐标列表
#     :param ranges: 要计算的范围列表 [10, 20, ..., 100]
#     :param result_queue: 结果队列
#     """
#     while True:
#         if chunk_queue.empty():
#             time.sleep(0.1)
#         chunk = chunk_queue.get()
#         if chunk == 'DONE':
#             break
#         process_chunk(chunk, file2_data, coord_list, ranges, result_queue)

# def calculate_density_for_category_multiprocessing(category, keys, file2_data, coord_list, ranges, num_processes=40):
#     """
#     对某个分类使用多进程计算浓度。
#     :param category: 分类名称
#     :param keys: 该分类的key列表
#     :param file2_data: 文件2中的数据
#     :param coord_list: 坐标列表
#     :param ranges: 要计算的范围列表 [10, 20, ..., 100]
#     :param num_processes: 使用的进程数
#     """
#     print(f"\nProcessing category: {category} with {len(keys)} keys")

#     chunk_size = 20
#     chunks = [keys[i:i + chunk_size] for i in range(0, len(keys), chunk_size)]
    
#     chunk_queue = Queue()
#     result_queue = Queue()
#     processes = []

#     # 启动工作进程
#     for _ in range(num_processes):
#         p = Process(target=worker_process, args=(chunk_queue, file2_data, coord_list, ranges, result_queue))
#         p.start()
#         processes.append(p)
    
#     # 将数据块放入队列
#     for chunk in chunks:
#         chunk_queue.put(chunk)
    
#     # 发送完成信号
#     for _ in processes:
#         chunk_queue.put('DONE')
    
#     # 等待工作进程完成
#     for p in processes:
#         p.join()

#     return result_queue

# def categorize_keys(file1_data, file2_data):
#     """
#     对文件1和文件2的key进行分类。
#     :param file1_data: 文件1的数据字典
#     :param file2_data: 文件2的数据字典
#     :return: 分类字典 {'low_low': [], 'low_high': [], 'high_low': [], 'high_high': []}
#     """
#     categories = {'low_low': [], 'low_high': [], 'high_low': [], 'high_high': []}
#     for key in file1_data:
#         if key in file2_data:
#             category1 = classify_value(file1_data[key][1])
#             category2 = classify_value(file2_data[key][1])
#             if category1 == 'low' and category2 == 'low':
#                 categories['low_low'].append(key)
#             elif category1 == 'low' and category2 == 'high':
#                 categories['low_high'].append(key)
#             elif category1 == 'high' and category2 == 'low':
#                 categories['high_low'].append(key)
#             elif category1 == 'high' and category2 == 'high':
#                 categories['high_high'].append(key)
#     return categories

# def classify_value(value):
#     """
#     根据值将其分类。
#     :param value: 浮点值
#     :return: 分类 'low', 'high', or 'middle'
#     """
#     if 0 <= value <= 0.3:
#         return 'low'
#     elif 0.7 <= value <= 1:
#         return 'high'
#     else:
#         return 'middle'

# def main(file1, file2, file3):
#     file1_data_filtered = read_tsv(file1, filter_col_10=True)
#     file1_data_all = read_tsv(file1, filter_col_10=False)
#     file3_data = read_tsv(file3, filter_col_10=True)

#     categories = categorize_keys(file1_data_filtered, file3_data)
    
#     print("\n分类结果:")
#     for category, keys in categories.items():
#         print(f'{category}: {len(keys)} keys')

#     ranges = list(range(10, 101, 10))

#     print("\n文件3各个分类区域的浓度计算结果:")

#     coord_map = create_coordinate_map(file1_data_all)
#     coord_list = sorted(coord_map.keys())  # 坐标排序
#     result_queue = Queue()
#     output_process = Process(target=output_results, args=(result_queue, ranges))
#     output_process.start()
    
#     # 按分类顺序处理
#     for category, keys in categories.items():
#         result_queue = calculate_density_for_category_multiprocessing(category, keys, file3_data, coord_list, ranges)

#     result_queue.put('DONE')
#     output_process.join()

# if __name__ == "__main__":
#     parser = argparse.ArgumentParser(description="分类三个TSV文件中的key，筛选交集并计算浓度")
#     parser.add_argument("file1", help="第一个TSV文件路径")
#     parser.add_argument("file2", help="第二个TSV文件路径")
#     parser.add_argument("file3", help="第三个TSV文件路径")
    
#     args = parser.parse_args()
    
#     main(args.file1, args.file2, args.file3)







import argparse
from multiprocessing import Process, Queue
from tqdm import tqdm
import sys
import time
from deepsignal3.utils.ref_reader import get_contig2len,get_contig2len_n_seq
from deepsignal3.utils.process_utils import get_refloc_of_methysite_in_motif
from deepsignal3.utils.process_utils import get_motif_seqs
from collections import defaultdict

def read_tsv(file_path,cov, filter_col_10=False):
    """
    读取TSV文件并返回数据字典。
    :param file_path: TSV文件路径
    :param filter_col_10: 是否过滤第10列值 >= 5
    :return: 数据字典 {key: (坐标, value)}
    """
    data = defaultdict()
    with open(file_path, 'r') as file:
        for line in file:
            parts = line.strip().split('\t')
            if len(parts) < 11:
                continue  # 跳过无效行
            key = (parts[0], parts[1])  # 使用前两列作为key
            # if parts[0] != 'chr1':
            #     continue
            col_10_value = int(parts[9])  # 第10列
            col_11_value = float(parts[10]) / 100.0  # 第11列的值除以100
            if filter_col_10:
                if col_10_value >= cov:
                    data[parts[0]][key] = (int(parts[1]), col_11_value)
            else:
                data[parts[0]][key] = (int(parts[1]), col_11_value)
    return data


def create_coordinate_map(reference_data):
    """
    Create a map of coordinates to (key, value) for quick lookups.
    """
    coord_map = {coord: (key, value) for key, (coord, value) in reference_data.items()}
    return coord_map


def calculate_density_with_accumulated_ranges(center_data, reference_data, ranges, queue):
    """
    Accumulate density calculations and send results to the queue.
    """
    #coord_map = create_coordinate_map(reference_data)
    #coord_list = sorted(coord_map.keys())  # Sort coordinates for easier range lookup

    density_results = {}

    for key, (center, _) in center_data.items():
        density_results[key] = {}
        try:
            center_idx = reference_data.index(center)  # Get index of the center in the sorted list
        except ValueError:
            try:
                center_idx = reference_data.index(center-1)
            except ValueError:
                continue

        total_count = 0
        left_index = center_idx - 1
        right_index = center_idx + 1

        for r in ranges:
            

            # Expand left
            while left_index >= 0 and center - reference_data[left_index] <= r:
                total_count += 1
                left_index -= 1

            # Expand right
            while right_index < len(reference_data) and reference_data[right_index] - center <= r:
                total_count += 1
                right_index += 1

            density_results[key][r] = total_count / (2 * r) if r > 0 else 0

        # 将结果推送到输出队列
        queue.put((key, density_results[key]))


def output_results(queue, ranges):
    """
    Continuously listen to the queue and output results as they are available.
    """
    while True:
        result = queue.get()
        if result == 'done':
            break  # Stop if the 'done' signal is received
        key, densities = result
        print(f'{key[0]}\t{key[1]}\t' + "\t".join(f"{densities[r]:.4f}" for r in ranges), flush=True)
        sys.stdout.flush()


def worker(queue, reference_data, ranges, result_queue):
    """
    Worker process to get batches from the queue and calculate densities.
    """
    while True:
        if queue.empty():
            time.sleep(0.1)
        batch_data = queue.get()
        
        if batch_data == 'done':
            break

        calculate_density_with_accumulated_ranges(batch_data, reference_data, ranges, result_queue)


def process_density_in_batches(data, reference_data, ranges, batch_size=20, num_workers=40):
    """
    Use a fixed number of worker processes to process batches of data in parallel.
    """
    keys = list(data.keys())
    batches = [keys[i:i + batch_size] for i in range(0, len(keys), batch_size)]
    
    queue = Queue()  # Queue to hold batches of data
    result_queue = Queue()  # Queue to hold results for output

    # Start output process
    output_process = Process(target=output_results, args=(result_queue, ranges))
    output_process.start()

    # Start worker processes
    workers = []
    for _ in range(num_workers):
        p = Process(target=worker, args=(queue, reference_data, ranges, result_queue))
        workers.append(p)
        p.start()

    # Feed batches into the queue
    for batch in batches:
        batch_data = {key: data[key] for key in batch}
        queue.put(batch_data)

    # Notify workers that processing is done
    for _ in range(num_workers):
        queue.put('done')

    # Wait for workers to finish
    for p in workers:
        p.join()

    # Notify output process that all results are done
    result_queue.put('done')
    output_process.join()


def main(args):
    # Load data from file1
    file1_data = read_tsv(args.file1,args.cov, filter_col_10=True)
    reference_path='/public/data/hpc174601028/xiaoyf/reference/chm13v2.0.fa'
    chrom2len,contigs = get_contig2len_n_seq(reference_path)
    
    motif_seqs = get_motif_seqs('CG',True)
    ref_data={}
    for key in contigs.keys():
        tsite_locs = get_refloc_of_methysite_in_motif(
                                contigs[key], set(motif_seqs),0)
        ref_data[key]=sorted(tsite_locs)

    #ref_data = read_tsv(file1, filter_col_10=False)

    ranges = list(range(10, 101, 10))  # Density ranges: 10, 20, ..., 100

    #print("\nCalculating densities for file1...")
    for key in ref_data.keys():
        process_density_in_batches(file1_data[key], ref_data[key], ranges)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Calculate densities for file1 and output progressively.")
    parser.add_argument("file1", help="Path to the first TSV file")
    parser.add_argument('--cov',default=5,required=False,type=int)

    args = parser.parse_args()

    main(args)

