import time
import re
import jieba

def extract_valid_info(file_path, encoding='gb18030'):
    """
    生成器函数，逐行读取文件内容并提取所需的信息。
    提取第5列及之后的内容，避免将整个文件加载到内存中。

    优点：
    1. 使用生成器按需加载数据，减少内存使用。
    2. 简化逻辑，避免中间变量存储整个文件内容。
    """
    with open(file_path, 'r', encoding=encoding) as data:
        for line in data:
            yield from line.split('\t')[4:]

def write_filtered_data(input_file_path, output_file_name):
    """
    读取文件内容并将过滤后的有效信息写入指定文件。

    优点：
    1. 使用列表存储所有记录，便于后续操作。
    2. 提供记录总数的输出，方便验证处理结果。
    """
    start_time = time.time()
    with open(input_file_path, 'r', encoding='gb18030') as data:
        all_loginfo = []

        for line in data:
            line_pre = line.split('\t')
            valid_info = line_pre[4:]
            all_loginfo.extend(valid_info)

        print(f"Total records read: {len(all_loginfo)}")

    with open(output_file_name, 'w', encoding='utf-8') as file:
        for words in all_loginfo:
            file.write(words + '\n')
    print(f"Time taken for memory processing: {time.time() - start_time:.2f} seconds")

def filter_data(input_file, output_file, pattern):
    """
    过滤文件内容中的无效信息，并将过滤后的内容写入新文件。
    """
    start_time = time.time()
    with open(input_file, 'r', encoding='utf-8') as data:
        fil_loginfo = []
        for word in data:
            filtered_line = re.sub(pattern, '', word)
            if filtered_line.strip():
                fil_loginfo.append(filtered_line)

    with open(output_file, 'w', encoding='utf-8') as file:
        for words in fil_loginfo:
            file.write(words)
    print(f"Time taken for filtering: {time.time() - start_time:.2f} seconds")

def segment_words(input_file, output_file):
    """
    对过滤后的文件内容进行分词，并将分词结果写入新文件。

    优点：
    1. 使用文件迭代减少内存使用。
    2. 利用列表解析简化代码结构。
    3. 使用文件的 writelines 方法一次性写入，减少磁盘IO次数。
    """
    start_time = time.time()
    sep_list = [seg for line in open(input_file, 'r', encoding='utf-8') for seg in jieba.cut(line.strip(), cut_all=False)]
    with open(output_file, 'w', encoding='utf-8') as file:
        file.writelines(f"{word}\n" for word in sep_list)
    print(f"Time taken for segmentation: {time.time() - start_time:.2f} seconds")

def load_stop_words(file_path):
    """
    加载停止词列表。

    优点：
    1. 使用set存储停止词以提高查找速度。
    """
    with open(file_path, 'r', encoding='utf-8') as f:
        return set(line.strip() for line in f)

def remove_stop_words(input_file, output_file, stop_words):
    """
    移除分词结果中的停止词。

    优点：
    1. 使用列表解析简化过滤逻辑。
    2. 减少中间变量，直接对文件内容进行处理。
    3. 使用文件的 writelines 方法一次写入所有内容。
    """
    start_time = time.time()
    with open(input_file, 'r', encoding='utf-8') as f:
        filtered_words = [word.strip() for word in f if word.strip() not in stop_words]
    with open(output_file, 'w', encoding='utf-8') as file:
        file.writelines(f"{word}\n" for word in filtered_words)
    print(f"Time taken for stop word removal: {time.time() - start_time:.2f} seconds")

def main():
    """
    主函数：执行多阶段数据处理，包括过滤、分词和移除停止词。
    统计每个阶段的时间并输出结果文件。

    优点：
    1. 使用上下文管理器确保文件安全关闭，避免资源泄漏。
    2. 提供阶段性统计功能，方便确认处理结果。
    """
    input_file_path = './user_tag_query.10W.TRAIN'

    # 第一阶段：直接逐行处理并写入文件
    output_file_name_1 = 'all_logs.txt'
    count = 0
    start_time = time.time()
    with open(output_file_name_1, 'w', encoding='utf-8') as output_file:
        for word in extract_valid_info(input_file_path):
            output_file.write(word + '\n')
            count += 1
    print(f"Total records written (streaming): {count}")
    print(f"Time taken for streaming processing: {time.time() - start_time:.2f} seconds")

    # 第二阶段：加载所有记录到内存并写入文件
    output_file_name_2 = 'all_logs_memory.txt'
    write_filtered_data(input_file_path, output_file_name_2)

    # 第三阶段：过滤无效内容
    filtered_output_file = 're_filter.txt'
    filter_pattern = r'https?://\S+|www\.\S+|[\w.-]+@[\w.-]+'
    filter_data(output_file_name_1, filtered_output_file, filter_pattern)

    # 第四阶段：分词处理
    segmented_output_file = 'jieba_words.txt'
    segment_words(filtered_output_file, segmented_output_file)

    # 第五阶段：移除停止词
    stop_words_path = "stop_words.txt"
    final_output_file = '../stopwords_filter.txt'
    stop_words = load_stop_words(stop_words_path)
    remove_stop_words(segmented_output_file, final_output_file, stop_words)

if __name__ == "__main__":
    main()

# import re
# import datetime
# import jieba
# from collections import Counter
# from chardet.universaldetector import UniversalDetector
#
# detector = UniversalDetector()
# detector.reset()
#
# filepath = "./user_tag_query.10W.TRAIN"
# for each in open(filepath, 'rb'):
#     detector.feed(each)
#     if detector.done:
#         break
# detector.close()
# fileencoding = detector.result['encoding']
# confidence = detector.result['confidence']
#
# if fileencoding == "GB2312" or "GBK":
#     fileencoding = "GB18030"
# print(fileencoding)
# print(confidence)
#
#
# # 首先读取文件
# start = datetime.datetime.now()
# data = open('./user_tag_query.10W.TRAIN', 'r', encoding=fileencoding)
# all_loginfo = []
#
# for line in data:
#     # 对第一行所有内容进行制表符拆分
#     line_pre = line.split('\t')
#     valid_info = line_pre[4:]
#
#     all_loginfo.extend(valid_info)
#     # 测试一下日志中总共有多少条记录
# print(len(all_loginfo))
#
# file_name = 'all_logs.txt'
# # 打开文件以写入模式
# with open(file_name, 'w', encoding='utf-8') as file:
#     # 逐行写入列表中的数据
#     for words in all_loginfo:
#         file.write(words + '\n')
#
# # 中间写代码块
# end = datetime.datetime.now()
# print('Running time: %s Seconds' % (end - start))
#
#
# # 过滤掉无效内容
# data = open('./all_logs.txt', 'r', encoding='utf-8')
# pattern = r'https?://\S+|www\.\S+|[\w.-]+@[\w.-]+|'
# file_name = 're_filter.txt'
# fil_loginfo = []
# count = 0
# for word in data:
#     filtered_line = re.sub(pattern, '', word)
#      # 打印经过过滤的行（如果不为空）
#     if filtered_line.strip():  # 检查经过过滤后的行是否不为空
#         fil_loginfo.append(filtered_line)
# with open(file_name, 'w', encoding='utf-8') as file:
#     # 逐行写入列表中的数据
#     for words in fil_loginfo:
#         file.write(words)
#
#
# sep_list = []
# data = open('./re_filter.txt', encoding='utf-8')
# for word in data:
#     word = word.strip()
#     seg_list = jieba.cut(word, cut_all=False)
#     Seg_list = list(seg_list)
#     sep_list.extend(Seg_list)
#
# file_name = 'jieba_words.txt'
#     # 打开文件以写入模式
# with open(file_name, 'w', encoding='utf-8') as file:
#     # 逐行写入列表中的数据
#     for words in sep_list:
#         file.write(words + '\n')
#
# stop_words = "stop_words.txt"
# stop_words_dict = open(stop_words, 'r', encoding='utf-8')
# stop_words_contents = stop_words_dict.read()
# stop_words_list = stop_words_contents.splitlines()
# stop_words_dict.close()
#
# word_list = []
# data = open('./jieba_words.txt', encoding='utf-8')
# for word in data:
#     word = word.strip()
#     word_list.append(word)
#
# def remove_stop_words(word_list, stop_words):
#     word_cleaned = []
#     for word in word_list:
#         if word not in stop_words:
#             word_cleaned.append(word)
#     return word_cleaned
#
# word_list = remove_stop_words(word_list, stop_words_list)
#
# print(len(word_list))
#
# file_name = 'stopwords_filter.txt'
#     # 打开文件以写入模式
# with open(file_name, 'w', encoding='utf-8') as file:
#     # 逐行写入列表中的数据
#     for words in word_list:
#         file.write(words + '\n')
#
# # 下面统计所有关键词的出现次数
#
# word_list = []
# data = open('./stopwords_filter.txt', encoding='utf-8')
# for word in data:
#     word = word.strip()
#     word_list.append(word)
#
#
# result = []
# count_result = Counter(word_list)
# for key, val in count_result.most_common(2000):
#     key_val = "关键字：" + key + "||" + "出现次数：" + str(val)
#     print(key_val)
#     result.append(key_val)
#
# file_name = 'seeds_keyvalue.txt'
# # 打开文件以写入模式
# with open(file_name, 'w', encoding='utf-8') as file:
#     # 逐行写入列表中的数据
#     for words in result:
#         file.write(words + '\n')