import re
from concurrent.futures import ThreadPoolExecutor

from timer import Timer

# 根据关键词组搜索相关数据
def search_keywords_in_file(source_file_path, output_file_path, keywords):
    try:
        # 关键词匹配模式
        pattern = re.compile('|'.join(re.escape(keyword) for keyword in keywords))
        timer = Timer()
        with open(source_file_path, 'r', encoding='utf-8') as source_file, \
            open(output_file_path, 'w', encoding='utf-8') as output_file:
            timer.start()
            for line in source_file:
                if pattern.search(line):
                    output_file.write(line)
        print("search_keywords_in_file")
        timer.stop()
        timer.elapsed()
    except Exception as e:
        print(f'Error:{e}')


def process_chunk(lines, keywords):
    # 关键词匹配模式
    pattern = re.compile('|'.join(re.escape(keyword) for keyword in keywords))
    new_lines = []
    for line in lines:
        if pattern.search(line):
            new_lines.append(line)
    return new_lines

def search_keywords_in_file_multithreaded(source_file_path, output_file_path, keywords, chunk_size=50000, max_workers=16):
    try:
        timer = Timer()
        with open(source_file_path, 'r', encoding='utf-8') as source_file, \
            open(output_file_path, 'w', encoding='utf-8') as output_file:
            timer.start()
            # 使用线程池处理数据块
            with ThreadPoolExecutor(max_workers=max_workers) as executor:
                while True:
                    # 读取文件的一个数据块
                    lines = source_file.readlines(chunk_size)
                    if not lines:
                        break
                    future = executor.submit(process_chunk, lines, keywords)
                    for new_line in future.result():
                        output_file.write(new_line)
        print("search_keywords_in_file")
        timer.stop()
        timer.elapsed()
    except Exception as e:
        print(f'Error:{e}')

if __name__ == '__main__':
    keywords = ['购物','促销','降价','美食','运动','科技','手机','天气','数码','芯片','内存','硬盘']
    source_file_path = 'data/user_tag_query.10W.TRAIN.encoded.remove_short_words.csv'
    output_file_path = 'data/search/user_tag_query.10W.TRAIN.encoded.search.csv'
    search_keywords_in_file(source_file_path, output_file_path, keywords)
    search_keywords_in_file_multithreaded(source_file_path, output_file_path, keywords)