from chardet.universaldetector import UniversalDetector
import pkuseg
import re
from collections import Counter
import time
from concurrent.futures import ThreadPoolExecutor

# 数据获取：获取原数据
def detect_encoding(file_path):
    detector = UniversalDetector()
    detector.reset()
    with open(file_path, 'rb') as f:
        for each in f:
            detector.feed(each)
            if detector.done:
                break
    detector.close()
    return detector.result['encoding'], detector.result['confidence']

def preprocess_data(input_file, output_file, encoding='utf-8'):
    try:
        with open(input_file, 'r', encoding=encoding, errors='ignore') as f:
            all_loginfo = [line.strip().split('\t')[4:] for line in f]
        with open(output_file, 'w', encoding='utf-8') as f:
            for valid_info in all_loginfo:
                f.write('\n'.join(valid_info) + '\n')
    except UnicodeDecodeError as e:
        print(f"编码错误：{e}")

# 关键字选定：确定自己所需要的 10-20 个种子关键字。
def select_keywords(keyword_counts, num_keywords=20):
    seed_keywords = [word for word, _ in keyword_counts.most_common(num_keywords)]
    return seed_keywords

# 数据处理：从原数据中提取出与种子关键字相关的搜索信息。
def filter_data(input_file, output_file, seed_keywords):
    def process_line(line):
        for seed_keyword in seed_keywords:
            if seed_keyword in line:
                filtered_line = re.sub(pattern, '', line).strip()
                if filtered_line:
                    return filtered_line
        return None

    pattern = r'https?://\S+|www\.\S+|[\w.-]+@[\w.-]+|'
    with open(input_file, 'r', encoding='utf-8') as f:
        filtered_data = [process_line(line) for line in f if process_line(line) is not None]
    with open(output_file, 'w', encoding='utf-8') as f:
        for words in filtered_data:
            f.write(words + '\n')

# 数据处理：从原数据中提取出与种子关键字相关的搜索信息。
def segment_data(input_file, output_file):
    # 初始化 pkuseg 分词器
    seg = pkuseg.pkuseg()  # 默认使用 pkuseg 预训练的模型
    with open(input_file, 'r', encoding='utf-8') as f:
        sep_list = []
        for line in f:
            words = seg.cut(line.strip())  # 使用 pkuseg 进行分词
            sep_list.extend(words)  # 将分词结果添加到列表中
    with open(output_file, 'w', encoding='utf-8') as f:
        for word in sep_list:
            f.write(word + '\n')

def remove_stopwords(input_file, output_file, stop_words_file):
    with open(stop_words_file, 'r', encoding='utf-8') as f:
        stop_words = set(f.read().splitlines())
    with open(input_file, 'r', encoding='utf-8') as f:
        word_list = [word for word in f if word.strip() not in stop_words]
    with open(output_file, 'w', encoding='utf-8') as f:
        for word in word_list:
            f.write(word + '\n')

def count_keywords(input_file):
    with open(input_file, 'r', encoding='utf-8') as f:
        word_list = [line.strip() for line in f]
    return Counter(word_list)

# 数据保存：将提取出的与种子关键字相关的搜索信息保存到指定的文件或数据库。
def save_data(output_file, data):
    with open(output_file, 'w', encoding='utf-8') as f:
        for item in data:
            f.write(item + '\n')

# 主函数
def main():
    start_time = time.time()

    input_file = './user_tag_query.10W.TRAIN'
    encoding, confidence = detect_encoding(input_file)
    print(f'Detected encoding: {encoding} with confidence {confidence}')
    preprocess_data(input_file, 'all_logs.txt', encoding)
    keyword_counts = count_keywords('all_logs.txt')
    seed_keywords = select_keywords(keyword_counts, num_keywords=20)
    print('Selected seed keywords:', seed_keywords)

    with ThreadPoolExecutor(max_workers=5) as executor:
        executor.submit(filter_data, 'all_logs.txt', 'filtered_data.txt', seed_keywords)
        executor.submit(segment_data, 'filtered_data.txt', 'segmented_data.txt')
        executor.submit(remove_stopwords, 'segmented_data.txt', 'cleaned_data.txt', 'stop_words.txt')

    save_data('final_data.txt', open('cleaned_data.txt', 'r', encoding='utf-8').readlines())

    end_time = time.time()
    total_time = end_time - start_time
    print(f"程序总运行时间：{total_time:.2f}秒")
    return total_time

# 运行多次并计算平均时间
def run_multiple_times(num_runs=5):
    total_time = 0
    for _ in range(num_runs):
        total_time += main()
    average_time = total_time / num_runs
    print(f"平均运行时间：{average_time:.2f}秒")

if __name__ == '__main__':
    run_multiple_times()