import time
import os
import glob
import re
import uuid
import multiprocessing

import jieba

from . import independent_script_folder, stopwords_fp


def __get_stopwords():
    __stopwords = set()
    with open(stopwords_fp, encoding='utf-8') as fi:
        for line in fi:
            __stopwords.add(line.strip())
    return __stopwords


stopwords = __get_stopwords()


def jieba_parallel_cut(txt_fp, save_fp, cpu_count=20):
    start = time.perf_counter()
    os.system('./venv/bin/python %s/jieba-parallel-cut.py %s %s %d' % (
        independent_script_folder, txt_fp, save_fp, cpu_count))
    tm_cost = time.perf_counter() - start
    print('jieba parallel cut: %fs' % tm_cost)
    jieba.disable_parallel()
    return save_fp


def filter_words(words):
    words = [w for w in words if len(w) > 1 and w not in stopwords]
    return words


def cut_doc(doc):
    words = [w.strip() for w in jieba.cut(doc.strip())]
    return filter_words(words)


def jieba_cut(txt_fp, save_fp):
    start = time.perf_counter()
    counter = 0
    with open(save_fp, 'w', encoding='utf-8') as fo:
        with open(txt_fp, encoding='utf-8') as fi:
            for line in fi:
                fo.write(' '.join(cut_doc(line)) + '\n')
                counter += 1
                if counter % 100 == 0:
                    current = time.perf_counter()
                    print('\rjieba cut: %d/%fs..' % (counter, current - start), end='')
    print('jieba cut consume: %d/%fs' % (counter, time.perf_counter() - start))


def jieba_batch_cut(txt_fp, save_fp, each_count=500000):
    """
    将文本分批并行切分。
    """
    start = time.perf_counter()
    output_prefix, output_suffix = os.path.splitext(save_fp)
    output_prefix = '%s-%s' % (output_prefix, get_random_uuid())
    print('Split %s..' % txt_fp)
    txt_sub_fps = split_file(txt_fp, each_count=each_count)
    sub_outputs = []
    processes = []
    print('Fetching text from %d sub files..' % len(txt_sub_fps))
    for index, txt_sub_fp in enumerate(txt_sub_fps):
        sub_output = '%s-%d%s' % (output_prefix, index, output_suffix)
        p = multiprocessing.Process(target=jieba_cut, args=(txt_sub_fp, sub_output))
        processes.append(p)
        sub_outputs.append(sub_output)
    for p in processes:
        p.start()
    for p in processes:
        p.join()
    print('Merging news from %d sub output..' % len(sub_outputs))
    merge_files(sub_outputs, save_fp)
    print('Removing sub files..')
    remove_files(txt_sub_fps)
    remove_files(sub_outputs)
    print('Done! Consume: %fs' % (time.perf_counter() - start))
    return save_fp, sub_outputs


def get_random_uuid():
    return str(uuid.uuid4())


def split_file(fp, each_count=100000):
    """
    将文件按行分成多个文件。
    """
    uid = get_random_uuid()
    prefix_name, additional_suffix = os.path.splitext(fp)
    prefix_name = '%s-%s' % (prefix_name, uid)
    cmd = 'split -l %d -d --additional-suffix %s %s %s-' % (each_count, additional_suffix, fp, prefix_name)
    os.system(cmd)
    files = glob.glob('%s-*%s' % (prefix_name, additional_suffix))
    pattern = '%s-\d+%s' % (re.escape(os.path.basename(prefix_name)), re.escape(additional_suffix))
    files = sorted([f for f in files if re.search(pattern, f)])
    return files


def merge_files(fps, output):
    """
    将多个文件合并。
    """
    cmd = 'cat %s > %s' % (' '.join(fps), output)
    os.system(cmd)


def remove_files(fps):
    for fp in fps:
        os.remove(fp)


def convert_sparse2dense(sparse_vector, size):
    dense_vector = [0.] * size
    for i, e in sparse_vector:
        dense_vector[i] = e
    return dense_vector
