import concurrent.futures
import re
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Pool

import jionlp

from timer import Timer


# 去除每行前4个单词
def remove_first_4_words(source_file_path, output_file_path):
    try:
        timer = Timer()
        with open(source_file_path, 'r', encoding='utf-8') as source_file, \
            open(output_file_path, 'w', encoding='utf-8') as output_file:
            timer.start()
            for line in source_file:
                process_line = re.sub(r'^(?:\S+\s+){4}', '', line)
                output_file.write(process_line)
        print("remove_first_4_words")
        timer.stop()
        timer.elapsed()
    except Exception as e:
        print(f'Error:{e}')

# 每个单词一行
def one_word_one_line(source_file_path, output_file_path, batch_size):
    try:
        timer = Timer()
        with open(source_file_path, 'r', encoding='utf-8') as source_file, \
            open(output_file_path, 'w', encoding='utf-8') as output_file:
            timer.start()
            while True:
                lines = source_file.readlines(batch_size)
                if not lines:
                    break
                text = ''.join(lines)
                modified_line = re.sub(r'\s+', '\n', text)
                output_file.write(modified_line)
        print("one_word_one_line")
        timer.stop()
        timer.elapsed()
    except Exception as e:
        print(f'Error:{e}')

# 清洗文本数据 去除文本中的异常字符、冗余字符、HTML标签、括号信息、URL、E-mail、电话号码，全角字母数字转换为半角
# 繁体转简体
def clean_data(source_file_path, output_file_path):
    try:
        timer = Timer()
        with open(source_file_path, 'r', encoding='utf-8') as source_file, \
            open(output_file_path, 'w', encoding='utf-8') as output_file:
            timer.start()
            for line in source_file:
                clean_line = jionlp.clean_text(line)
                clean_line = jionlp.tra2sim(clean_line, mode='char')
                output_file.write(clean_line)
        print("clean_data")
        timer.stop()
        timer.elapsed()
    except Exception as e:
        print(f'Error:{e}')


def process_chunk(lines):
    """清洗数据块中的每一行"""
    cleaned_lines = []
    for line in lines:
        clean_line = jionlp.clean_text(line)
        clean_line = jionlp.tra2sim(clean_line, mode='char')
        cleaned_lines.append(clean_line)
    return cleaned_lines

# 多线程清理数据
def clean_data_multithreaded(source_file_path, output_file_path, chunk_size=1000, max_workers=4):
    try:
        timer = Timer()
        with open(source_file_path, 'r', encoding='utf-8') as source_file, \
            open(output_file_path, 'w', encoding='utf-8') as output_file:
            timer.start()
            # 使用线程池处理数据块
            with ThreadPoolExecutor(max_workers=max_workers) as executor:
                while True:
                    # 读取文件的一个数据块
                    lines = source_file.readlines(chunk_size)
                    if not lines:
                        break

                    # 异步清洗数据块
                    future = executor.submit(process_chunk, lines)
                    # 写入清洗后的数据
                    for clean_line in future.result():
                        output_file.write(clean_line)
        print("clean_data_multithreaded")
        timer.stop()
        timer.elapsed()
    except Exception as e:
        print(f'Error: {e}')

def clean_line_data(line):
    # 清洗单行数据
    clean_line = jionlp.clean_text(line)
    clean_line = jionlp.tra2sim(clean_line, mode='char')
    return clean_line

def process_batch(batch_lines):
    # 清洗一批数据
    return [clean_line_data(line) for line in batch_lines]

# 多进程清理数据
def clean_data_multiprocess(source_file_path, output_file_path, batch_size=1000, num_processes=4):
    try:
        timer = Timer()
        with open(source_file_path, 'r', encoding='utf-8') as source_file, \
             open(output_file_path, 'w', encoding='utf-8') as output_file:
            timer.start()
            with Pool(processes=num_processes) as pool:
                while True:
                    # 分批读取
                    batch_lines = [source_file.readline() for _ in range(batch_size)]
                    batch_lines = [line for line in batch_lines if line]  # 移除空行
                    if not batch_lines:
                        break

                    # 使用多进程处理每批数据
                    cleaned_batch = pool.map(process_batch, [batch_lines])[0]

                    # 写入清洗后的数据
                    output_file.writelines(cleaned_batch)
        print("clean_data_multiprocess")
        timer.stop()
        timer.elapsed()
    except Exception as e:
        print(f'Error: {e}')

# 删除不含中文字符的单词
def remove_not_chines(source_file_path, output_file_path):
    try:
        timer = Timer()
        with open(source_file_path, 'r', encoding='utf-8') as source_file, \
            open(output_file_path, 'w', encoding='utf-8') as output_file:
            timer.start()
            for line in source_file:
                # 匹配中文字符的正则表达式
                chinese_pattern = re.compile(r'[\u4e00-\u9fff]')
                # 如果包含中文字符
                if chinese_pattern.search(line):
                    output_file.write(line)
        print("remove_not_chines")
        timer.stop()
        timer.elapsed()
    except Exception as e:
        print(f'Error:{e}')

# 过滤长度为1的单词(太短无法分析竞争性)
def remove_short_words(source_file_path, output_file_path):
    try:
        timer = Timer()
        with open(source_file_path, 'r', encoding='utf-8') as source_file, \
            open(output_file_path, 'w', encoding='utf-8') as output_file:
            timer.start()
            for line in source_file:
                # 去除换行符
                temp = re.sub(r'\s+', '', line)
                if len(temp) > 1:
                    output_file.write(line)
        print("remove_short_words")
        timer.stop()
        timer.elapsed()
    except Exception as e:
        print(f'Error:{e}')


# remove_first_4_words('data/user_tag_query.10W.TRAIN.encoded.csv', 'data/user_tag_query.10W.TRAIN.encoded.remove_first_4_words.csv')
# one_word_one_line('data/user_tag_query.10W.TRAIN.encoded.remove_first_4_words.csv', 'data/user_tag_query.10W.TRAIN.encoded.one_word_one_line.csv', 50000)
# clean_data('data/user_tag_query.10W.TRAIN.encoded.one_word_one_line.csv', 'data/user_tag_query.10W.TRAIN.encoded.cleaned.csv')
# clean_data_multithreaded('data/user_tag_query.10W.TRAIN.encoded.one_word_one_line.csv', 'data/user_tag_query.10W.TRAIN.encoded.cleaned.csv', chunk_size=10000, max_workers=6)
# remove_not_chines('data/user_tag_query.10W.TRAIN.encoded.cleaned.csv', 'data/user_tag_query.10W.TRAIN.encoded.chinese.csv')
remove_short_words('data/user_tag_query.10W.TRAIN.encoded.chinese.csv', 'data/user_tag_query.10W.TRAIN.encoded.remove_short_words.csv')