import os
import gzip
import glob
import argparse
import logging
from copy import copy
from tqdm.auto import tqdm
import multiprocessing
import time

parser = argparse.ArgumentParser(
    prog='gz file search tool',
    description='search given patterns from gz files, write results as plain text file',
    epilog='[Example Usage]: python gzsearch.py -i snp_list.txt -f ./*1.txt.gz -o extract_content.txt --headline')

parser.add_argument('-i', '--input_path', help="input .txt file that gives strings to be searched")
parser.add_argument('-f', '--files', action="extend", nargs="+", type=str, help="target .gz files to be searched")
parser.add_argument('-o', '--output_path', help="output .txt file that includes searched results",
                    default="extract_content.txt")  # optional
parser.add_argument('--headline', help='put this on your command if you need to add headline to output file',
                    action='store_true')
parser.add_argument('--print', help='put this on your command if print the result to console', action='store_true')
args = parser.parse_args()

# parse patterns from input_path
if not os.path.exists(args.input_path):
    raise Exception(f"input path not exists: {args.input_path}")

patterns = []
with open(args.input_path, "rt") as f:
    for line in f:
        _pattern = line.strip()
        patterns.append(f"{_pattern}\t")

if len(patterns) < 1:
    raise Exception(f"no patterns exists in: {args.input_path}")
print(f"Found {len(patterns)} patterns to search")

# search patterns
files = args.files
if type(args.files) == str:
    files = glob.glob(args.files)
need_head = args.headline

PATTERNS = set([str(item).replace('\t', '') for item in copy(patterns)])

def scan_single_file(file):
    file_name = file.split('/')[-1]

    try:
        with gzip.open(file, 'rt') as f:
            for line in tqdm(f):
                if str(line.split()[3]) in PATTERNS:
                    continue
                    #print(line.strip() + f"\t{file_name}")
                # Add a head line
                #if need_head:
                    #print(line.strip() + "\tsource_file_name", file=o, flush=True)
                    #if args.print:
                    #    print(line.strip() + "\tsource_file_name")
                    need_head = False
                # Search all patterns

                    #if args.print:
                    #    print(line.strip() + f"\t{file_name}")
    except Exception as e:
        logging.error(f"Error detected while processing the file {file_name}, skip this file")
        logging.error(e)

def main():

    files = ["18434_141_TBPL1_TBPL1.txt.gz","18434_141_TBPL2_TBPL2.txt.gz",
             "18434_141_TBPL3_TBPL3.txt.gz","18434_141_TBPL4_TBPL4.txt.gz"]
    # 创建进程池，进程数根据你的CPU核心数调整
    pool = multiprocessing.Pool(processes=4)

    # 使用进程池并行处理
    results = pool.map(scan_single_file, files)

    # 关闭进程池并等待所有进程完成
    pool.close()
    pool.join()


if __name__=="__main__":
    time1 = time.time()
    main()
    print(time.time()-time1)