import os
import gzip
import glob
import argparse

from copy import copy
from tqdm.auto import tqdm
import multiprocessing
import time
import threading
from queue import Queue
from collections import defaultdict
import shutil
# Max process number
CPU_KERNEL = min(multiprocessing.cpu_count(),2)

# Get params from terminal
parser = argparse.ArgumentParser(
    prog='gz file search tool',
    description='search given patterns from gz files, write results as plain text file',
    epilog='[Example Usage]: python gzsearch.py -i snp_list.txt -f ./*1.txt.gz -o extract_content.txt --headline')

parser.add_argument('-i', '--input_path', help="input .txt file that gives strings to be searched")
parser.add_argument('-f', '--files', action="extend", nargs="+", type=str, help="target .gz files to be searched")
parser.add_argument('-o', '--output_path', help="output .txt file that includes searched results",
                    default="extract_content.txt")  # optional
parser.add_argument('--headline', help='put this on your command if you need to add headline to output file',
                    action='store_true')
parser.add_argument('--print', help='put this on your command if print the result to console', action='store_true')
args = parser.parse_args()

# parse patterns from input_path
if not os.path.exists(args.input_path):
    raise Exception(f"input path not exists: {args.input_path}")

# get patterns
patterns = []
with open(args.input_path, "rt") as f:
    for line in f:
        _pattern = line.strip()
        patterns.append(f"{_pattern}\t")

#if len(patterns) < 1:
#    raise Exception(f"no patterns exists in: {args.input_path}")
#print(f"Found {len(patterns)} patterns to search")
OUTPUT_PATH = args.output_path.split(".")[0]
# search patterns
files = args.files
if len(args.files) == 1:
    files = glob.glob(args.files[0])
    SCAN_LIST = files
else:
    SCAN_LIST = files
#need_head = args.headline
PATTERNS = set([str(item).replace('\t', '') for item in copy(patterns)])

# scan one file
def scan_single_file(file):
    try:
        with gzip.open(file, 'rt') as f:
            single_file_result = []
            for line in tqdm(f):
                if str(line.split()[3]) in PATTERNS:
                    single_file_result.append(line.strip() + f"\t{file}")
                    continue

    except Exception as e:
        return file,str(e)

    # 写入数据
    output_path = os.path.join(OUTPUT_PATH , f"{file}.txt")

    try:
        with open(output_path, "w", encoding="utf-8") as f:
            for item in single_file_result:
                print(item, file=f)
    except Exception as e:
        print(f"写入文件时出错: {e}")

    return None

def threaded_merge_txt(input_dir='.', output_file=OUTPUT_PATH):
    """
    多线程合并文本文件，保持每个文件内容的原始行顺序

    参数:
        input_dir: 输入目录路径
        output_file: 输出文件路径
    """
    # 获取所有txt文件（绝对路径）
    txt_files = glob.glob(f'{input_dir}/*.txt')
    print(f"Success scanning {len(txt_files)} file(s)")
    print("--" * 20)
    if not txt_files:
        print("No .txt found")
        return
    else:
        print("Merge start ...")
    start_time = time.time()

    # 初始化队列和锁
    file_queue = Queue()
    write_lock = threading.Lock()
    file_contents = defaultdict(str)  # 存储文件内容

    # 填充队列
    for file_path in txt_files:
        file_queue.put(file_path)

    def worker():
        """工作线程函数：读取文件内容"""
        while not file_queue.empty():
            try:
                file_path = file_queue.get_nowait()
                with open(file_path, 'r', encoding='utf-8') as f:
                    content = f.read()
                file_contents[file_path] = content  # 存储内容
            finally:
                file_queue.task_done()

    # 创建工作线程（数量为CPU核心数的2-4倍）
    thread_count = min(4, len(txt_files))
    threads = []
    for _ in range(thread_count):
        t = threading.Thread(target=worker)
        t.start()
        threads.append(t)

    # 等待所有线程完成
    file_queue.join()

    # 按原始文件列表顺序写入输出文件
    with open(args.output_path, 'w', encoding='utf-8') as out_f:
        for file_path in txt_files:
            #out_f.write(f"\n\n=== {file_path.split('/')[-1]} ===\n\n")
            out_f.write(file_contents[file_path])

    # 计算耗时
    elapsed = time.time() - start_time
    shutil.rmtree(OUTPUT_PATH)
    print(f"Merge time: {elapsed:.2f} s, result file: '{output_file}'")
    print("Merge completed!")

def main():

    ##注意：不建议修改 folder_name
    folder_name = OUTPUT_PATH

    #创建文件夹
    print(f"-" * 20)
    try:
        os.mkdir(folder_name)
        print(f"Temp data folder '{folder_name}' established")
    except FileExistsError:
        pass
    print(f"Start scanning {len(SCAN_LIST)} file(s) ")

    # 创建进程池，进程数根据你的CPU核心数调整
    pool = multiprocessing.Pool(processes=CPU_KERNEL)
    # 使用进程池并行处理
    results = list(tqdm(pool.imap(scan_single_file, SCAN_LIST), total=len(SCAN_LIST)))
    # 关闭进程池并等待所有进程完成
    pool.close()
    pool.join()

    #合并所有结果数据
    threaded_merge_txt(input_dir = folder_name)

    # 输出扫描失败的文件和原因
    error_event = list(filter(None, results))
    for item in error_event:
        print(f"WANRING: Skip data file {item[0]} due to {item[1]}")
    return len(error_event)

if __name__=="__main__":
    time1 = time.time()
    error_number = main()
    print("--" * 20)
    print(f"Total time: {time.time()-time1:.2f} s")
    print(f"Success: {len(SCAN_LIST)-error_number} file(s)")
    print(f"Fail: {error_number} file(s)")
    print("--" * 20)