# preprocess.py
from chardet.universaldetector import UniversalDetector
import jieba
import re
from collections import Counter
import time


# 数据获取：获取原数据
def detect_encoding(file_path):
    """
    检测文件编码，确保正确读取文件内容。

    参数:
    file_path (str): 文件路径。

    返回:
    encoding (str): 文件编码。
    confidence (float): 编码检测的置信度。
    """
    file_path = "./user_tag_query.10W.TRAIN"
    detector = UniversalDetector()
    detector.reset()
    with open(file_path, 'rb') as f:
        for each in f:
            detector.feed(each)
            if detector.done:
                break
    detector.close()
    return detector.result['encoding'], detector.result['confidence']


def preprocess_data(input_file, output_file, encoding='utf-8'):
    """
    预处理：从原始数据源中读取并初步检查和清理数据，去除重复或无效数据。

    参数:
    input_file (str): 输入文件路径。
    output_file (str): 输出文件路径。
    encoding (str): 文件编码，默认为utf-8。
    """
    try:
        with open(input_file, 'r', encoding=encoding, errors='ignore') as f:
            all_loginfo = [line.strip().split('\t')[4:] for line in f]
        with open(output_file, 'w', encoding='utf-8') as f:
            for valid_info in all_loginfo:
                f.write('\n'.join(valid_info) + '\n')
    except UnicodeDecodeError as e:
        print(f"编码错误：{e}")


# 关键字选定：确定自己所需要的 10-20 个种子关键字。
def select_keywords(keyword_counts, num_keywords=20):
    """
    从关键词统计中选择前N个关键词作为种子关键字。

    参数:
    keyword_counts (Counter): 关键词出现次数的统计。
    num_keywords (int): 需要选择的种子关键字数量，默认为20。

    返回:
    seed_keywords (list): 选定的种子关键字列表。
    """
    seed_keywords = [word for word, _ in keyword_counts.most_common(num_keywords)]
    return seed_keywords


# 数据处理：从原数据中提取出与种子关键字相关的搜索信息。
def filter_data(input_file, output_file, seed_keywords):
    """
    搜索：根据种子关键字搜索原始数据，提取与种子关键词相关的数据。

    参数:
    input_file (str): 输入文件路径。
    output_file (str): 输出文件路径。
    seed_keywords (list): 种子关键字列表。
    """
    pattern = r'https?://\S+|www\.\S+|[\w.-]+@[\w.-]+|'
    with open(input_file, 'r', encoding='utf-8') as f:
        fil_loginfo = []
        for line in f:
            for seed_keyword in seed_keywords:
                if seed_keyword in line:
                    filtered_line = re.sub(pattern, '', line).strip()
                    if filtered_line:
                        fil_loginfo.append(filtered_line)
                        break
    with open(output_file, 'w', encoding='utf-8') as f:
        for words in fil_loginfo:
            f.write(words + '\n')


def segment_data(input_file, output_file):
    """
    分词：对搜索提炼过的数据进行分词，进一步处理与种子关键字的相关数据。

    参数:
    input_file (str): 输入文件路径。
    output_file (str): 输出文件路径。
    """
    with open(input_file, 'r', encoding='utf-8') as f:
        sep_list = [word for line in f for word in jieba.cut(line.strip(), cut_all=False)]
    with open(output_file, 'w', encoding='utf-8') as f:
        for word in sep_list:
            f.write(word + '\n')


def remove_stopwords(input_file, output_file, stop_words_file):
    """
    停用词过滤：去除停用词，进一步清洗数据。

    参数:
    input_file (str): 输入文件路径。
    output_file (str): 输出文件路径。
    stop_words_file (str): 停用词文件路径。
    """
    with open(stop_words_file, 'r', encoding='utf-8') as f:
        stop_words = set(f.read().splitlines())
    with open(input_file, 'r', encoding='utf-8') as f:
        word_list = [word for word in f if word.strip() not in stop_words]
    with open(output_file, 'w', encoding='utf-8') as f:
        for word in word_list:
            f.write(word + '\n')


def count_keywords(input_file):
    """
    统计关键词出现次数。

    参数:
    input_file (str): 输入文件路径。

    返回:
    Counter: 关键词出现次数的统计。
    """
    with open(input_file, 'r', encoding='utf-8') as f:
        word_list = [line.strip() for line in f]
    return Counter(word_list)


# 数据保存：将提取出的与种子关键字相关的搜索信息保存到指定的文件或数据库。
def save_data(output_file, data):
    """
    将数据保存到文件。

    参数:
    output_file (str): 输出文件路径。
    data (list): 要保存的数据列表。
    """
    with open(output_file, 'w', encoding='utf-8') as f:
        for item in data:
            f.write(item + '\n')


def main():
    start_time = time.time()  # 记录程序开始时间

    input_file = './user_tag_query.10W.TRAIN'
    encoding, confidence = detect_encoding(input_file)
    print(f'Detected encoding: {encoding} with confidence {confidence}')
    preprocess_data(input_file, 'all_logs.txt', encoding)
    keyword_counts = count_keywords('all_logs.txt')

    seed_keywords = select_keywords(keyword_counts, num_keywords=20)
    # # 从keywords.txt文件中读取种子关键词
    # with open('keywords.txt', 'r', encoding='utf-8') as f:
    #     seed_keywords = [line.strip() for line in f.readlines()]
    print('Selected seed keywords:', seed_keywords)

    # 打印每个种子关键词的出现频率
    print("\nKeyword Frequencies:")
    for keyword in seed_keywords:
        count = keyword_counts.get(keyword, 0)
        print(f"{keyword}: {count}")

    filter_data('all_logs.txt', 'filtered_data.txt', seed_keywords)
    segment_data('filtered_data.txt', 'segmented_data.txt')
    remove_stopwords('segmented_data.txt', 'cleaned_data.txt', 'stop_words.txt')
    save_data('final_data.txt', open('cleaned_data.txt', 'r', encoding='utf-8').readlines())

    end_time = time.time()  # 记录程序结束时间
    total_time = end_time - start_time  # 计算总运行时间
    print(f"\n程序总运行时间：{total_time:.2f}秒")  # 打印总运行时间


if __name__ == '__main__':
    main()