import multiprocessing
from tqdm import tqdm
import fasttext
from f_config import f_conf
from config import conf
import thulac
import jieba.posseg as pseg
from utils import stopwords, ensure_pdir_exist

thu = thulac.thulac(T2S=True, filt=True)


def do_cut(text, char=False, pos_filter=False, use_thulac=False):
    """
    分词+词性过滤
    """
    text = text.strip()
    if char:
        return ' '.join(text)
    words = []
    pos_list = []
    if use_thulac:
        for word_pos in thu.cut(text, text=True).split(' '):
            t = word_pos.split('_')
            if len(t) >= 2:
                word, pos = t[0], t[1]
            else:
                continue
            words.append(word)
            pos_list.append(pos)
    else:
        for word, flag in pseg.cut(text):
            words.append(word)
            pos_list.append(flag)

    result = []
    for word, pos in zip(words, pos_list):
        if word in stopwords:
            continue
        if pos_filter:
            if ((use_thulac and pos in f_conf.thulac_allow_pos)
                    or
                    (not use_thulac and pos in f_conf.jieba_allow_pos)):
                result.append(word)
            continue
        result.append(word)
    return ' '.join(result)


def process_chunk(chunk, kwargs_dict):
    """处理单个数据块的函数"""
    processed = []
    for i, line in enumerate(chunk):
        if (i + 1) % 10 == 0:
            print(f'正在处理第{i + 1}行数据')
        label, text = line.strip().split('\t')
        cut = do_cut(text, **kwargs_dict)
        new_line = f'__label__{label} {cut}'
        processed.append(new_line)
    return processed


def preprocess_mul(path, target_path, n_processes=10, **kwargs):
    # 读取全部数据到内存
    with open(path, 'r', encoding='utf-8') as f:
        lines = [line.strip() for line in f]

    total_lines = len(lines)
    chunk_size = total_lines // n_processes
    remainder = total_lines % n_processes
    chunks = [lines[i * chunk_size: (i + 1) * chunk_size] for i in range(n_processes)]
    if remainder != 0:
        chunks[-1] += lines[-remainder:]

    # 并行处理
    with multiprocessing.Pool(processes=n_processes) as pool:
        # 显示处理进度
        results = []
        with tqdm(total=len(chunks), desc='并行处理进度') as pbar:
            for result in pool.starmap(process_chunk,
                                       [(chunk, kwargs) for chunk in chunks]):
                results.append(result)
                pbar.update(1)

    # 合并结果
    preprocessed_data = []
    for result in results:
        preprocessed_data.extend(result)

    # 输出结果
    print(f'处理后数据:\n{preprocessed_data[:5]}')
    ensure_pdir_exist(target_path)
    with open(target_path, 'w', encoding='utf-8') as f:
        f.write('\n'.join(preprocessed_data))
    print(f'处理数据完成，保存到{target_path}')


def _test_thulac():
    text = '我爱北京天安门'
    thu1 = thulac.thulac()
    print(thu1.cut(text, text=True))


def _test_preproc_mul():
    preprocess_mul(conf.val_path, f_conf.compare_data_dir + 'thulac_val.txt', n_processes=10, use_thulac=True)


def preprocess(path, target_path, **kwargs):
    preprocessed_data = []
    with open(path, 'r', encoding='utf-8') as f:
        for line in tqdm(f, desc=f'开始处理{path}...'):
            label, text = line.strip().split('\t')
            cut = do_cut(text, **kwargs)
            new_line = f'__label__{label} {cut}'
            preprocessed_data.append(new_line)
    print(f'处理后数据:\n{preprocessed_data[:5]}')
    ensure_pdir_exist(target_path)
    with open(target_path, 'w', encoding='utf-8') as f:
        f.write('\n'.join(preprocessed_data))
    print(f'处理数据完成，保存到{target_path}')


def train_with_fasttext(
        input_path,
        model_path,
        test_path,
        autotune=False,
        val_path=None,
        autotune_duration=None,
        **kwargs
):
    if not autotune:
        model = fasttext.train_supervised(input_path, **kwargs)
    else:
        model = fasttext.train_supervised(
            input_path,
            autotuneValidationFile=val_path,
            autotuneDuration=autotune_duration,
            **kwargs
        )
    ensure_pdir_exist(model_path)
    model.save_model(model_path)
    num_samples, acc, prec = model.test(test_path)
    print(f'测试结果 num_samples={num_samples} acc={acc} prec={prec}')


def _test_optimize():
    # 对比按词和按字符分割
    preprocess(conf.train_path, f_conf.compare_data_dir + 'none_train.txt')
    preprocess(conf.test_path, f_conf.compare_data_dir + 'none_test.txt')
    train_with_fasttext(
        f_conf.compare_data_dir + 'none_train.txt',
        f_conf.compare_data_dir + 'none_model.bin',
        f_conf.compare_data_dir + 'none_test.txt',
    )
    # 测试结果 num_samples=10000 acc=0.9305 prec=0.9305
    preprocess(conf.train_path, f_conf.compare_data_dir + 'char_train.txt', char=True)
    preprocess(conf.test_path, f_conf.compare_data_dir + 'char_test.txt', char=True)
    train_with_fasttext(
        f_conf.compare_data_dir + 'char_train.txt',
        f_conf.compare_data_dir + 'char_model.bin',
        f_conf.compare_data_dir + 'char_test.txt',
    )
    # 测试结果 num_samples=10000 acc=0.9271 prec=0.9271 降低
    # 对比进行词性过滤
    preprocess(conf.train_path, f_conf.compare_data_dir + 'filter_train.txt', pos_filter=True)
    preprocess(conf.test_path, f_conf.compare_data_dir + 'filter_test.txt', pos_filter=True)
    train_with_fasttext(
        f_conf.compare_data_dir + 'filter_train.txt',
        f_conf.compare_data_dir + 'filter_model.bin',
        f_conf.compare_data_dir + 'filter_test.txt',
    )
    # 测试结果 num_samples=10000 acc=0.9509 prec=0.9509 提升
    # 对比jieba分词和THULAC分词
    preprocess_mul(conf.train_path, f_conf.compare_data_dir + 'thulac_train.txt', use_thulac=True)
    preprocess_mul(conf.test_path, f_conf.compare_data_dir + 'thulac_test.txt', use_thulac=True)
    train_with_fasttext(
        f_conf.compare_data_dir + 'thulac_train.txt',
        f_conf.compare_data_dir + 'thulac_model.bin',
        f_conf.compare_data_dir + 'thulac_test.txt',
    )
    # 测试结果 num_samples=10000 acc=0.9469 prec=0.9469 提升
    pass


# 结论：使用token切分，使用thulac分词，使用词性过滤
best_kwargs = {
    'char': False,
    'pos_filter': True,
    'use_thulac': True
}


def do_train():
    # preprocess_mul(
    #     conf.train_path,
    #     f_conf.processed_train_path,
    #     **best_kwargs
    # )
    # preprocess_mul(
    #     conf.val_path,
    #     f_conf.processed_val_path,
    #     **best_kwargs
    # )
    # preprocess_mul(
    #     conf.test_path,
    #     f_conf.processed_test_path,
    #     **best_kwargs
    # )
    """
unsupervised_default = {
    'model': "skipgram",
    'lr': 0.05,
    'dim': 100,
    'ws': 5,
    'epoch': 5,
    'minCount': 5,
    'minCountLabel': 0,
    'minn': 3,
    'maxn': 6,
    'neg': 5,
    'wordNgrams': 1,
    'loss': "ns",
    'bucket': 2000000,
    'thread': multiprocessing.cpu_count() - 1,
    'lrUpdateRate': 100,
    't': 1e-4,
    'label': "__label__",
    'verbose': 2,
    'pretrainedVectors': "",
    'seed': 0,
    'autotuneValidationFile': "",
    'autotuneMetric': "f1",
    'autotunePredictions': 1,
    'autotuneDuration': 60 * 5,  # 5 minutes
    'autotuneModelSize': ""
}

    """
    train_with_fasttext(
        f_conf.processed_train_path,
        f_conf.model_path,
        f_conf.processed_test_path,
        autotune=True,
        val_path=f_conf.processed_val_path,
        autotune_duration=20 * 60,
    )
    # 测试结果 num_samples=10000 acc=0.9554 prec=0.9554
    pass


_model = None


def get_model():
    global _model
    if _model is None:
        _model = fasttext.load_model(f_conf.model_path)
    return _model


def predict(text):
    return get_model().predict(do_cut(text, **best_kwargs))[0][0][9:]


def _test_predict():
    for text, y_true in zip(conf.test_texts, conf.test_results):
        result = predict(text.strip())
        print(f'predict text={text[:100]}\nresult={result}\tcorrect={result == y_true}')


def _show_model_param():
    model = get_model()
    params = {
        "dimension": model.get_dimension(),
    }
    print(f'params={params}')


if __name__ == '__main__':
    _test_thulac()
    _test_preproc_mul()
    _test_optimize()
    do_train()
    _show_model_param()
    _test_predict()
    pass
