import re
from collections import defaultdict

import jieba

from timer import Timer


def classify_lines_by_keywords(source_file_path, output_dir_path, keywords):
    try:
        # 基于关键词的分类字典
        keyword_categories = defaultdict(list)
        # 关键词匹配
        keyword_patterns = {keyword: re.compile(re.escape(keyword)) for keyword in keywords}
        timer = Timer()
        with open(source_file_path, 'r', encoding='utf-8') as source_file:
            timer.start()
            # 对每个关键词提取相关数据
            for line in source_file:
                # 检查每个关键词并将匹配的行添加到对应的类别中
                for keyword, pattern in keyword_patterns.items():
                    if pattern.search(line):
                        keyword_categories[keyword].append(line)
        # 保存分类结果
        for keyword, lines in keyword_categories.items():
            with open(f"{output_dir_path}/{keyword}_classified.csv", 'w', encoding='utf-8') as output_file:
                new_lines = '\n'.join(lines)
                # 去除多余的换行符
                new_lines = re.sub(r'\s+', '\n', new_lines)
                output_file.writelines(new_lines)

        print("classify_lines_by_keywords")
        timer.stop()
        timer.elapsed()
    except Exception as e:
        print(f'Error:{e}')

# 分词并进行数据清理
def segment_words(source_dir_path, output_dir_path, keywords):
    try:
        timer = Timer()
        timer.start()
        for keyword in keywords:
            with open(f"{source_dir_path}/{keyword}_classified.csv", 'r', encoding='utf-8') as source_file, \
                open(f"{output_dir_path}/{keyword}_segmentation.csv", 'w', encoding='utf-8') as output_file:
                text = source_file.read()
                text_seg_list = jieba.cut_for_search(text)
                text_seg = '\n'.join(text_seg_list)
                # 去除多余的换行符
                text_seg = re.sub(r'\s+', '\n', text_seg)
                output_file.write(text_seg)
                source_file.close()
                output_file.close()
            # 去除纯数字，标点
            with open(f"{source_dir_path}/{keyword}_segmentation.csv", 'r', encoding='utf-8') as source_file, \
                open(f"{output_dir_path}/{keyword}_segmentation_clean.csv", 'w', encoding='utf-8') as output_file:
                pattern = re.compile(r'[\u4e00-\u9fff]|(?=.*[A-Za-z])(?=.*\d)')
                for line in source_file:
                    # 去除纯数字，标点
                    if pattern.search(line):
                        output_file.write(line)

        print("segment_words")
        timer.stop()
        timer.elapsed()

    except Exception as e:
        print(f'Error:{e}')

# 去除停止词，停止词来源nltk
def remove_stop_words(source_dir_path, output_dir_path, keywords):
    try:
        timer = Timer()
        timer.start()
        for keyword in keywords:
            with open(f"{source_dir_path}/{keyword}_segmentation_clean.csv", 'r', encoding='utf-8') as source_file, \
                open(f"{output_dir_path}/{keyword}_segmentation_clean_remove_stop_words.csv", 'w', encoding='utf-8') as output_file, \
                open('stopwords/chinese', 'r', encoding='utf-8') as stopwords:
                stopword_list = stopwords.read().splitlines()
                # 去除关键词本身
                stopword_list.append(keyword)
                word_list = source_file.read().splitlines()
                new_list = [word for word in word_list if word not in stopword_list]
                output_file.writelines('\n'.join(new_list))

        print("remove_stop_words")
        timer.stop()
        timer.elapsed()

    except Exception as e:
        print(f'Error:{e}')



keywords = ['购物', '促销', '降价', '美食', '运动', '科技', '手机', '天气', '数码', '芯片', '内存', '硬盘']
# classify_lines_by_keywords('data/search/user_tag_query.10W.TRAIN.encoded.search.csv', 'data/segmentation', keywords)
# segment_words('data/segmentation', 'data/segmentation', keywords)
remove_stop_words('data/segmentation', 'data/segmentation', keywords)