import time
import json
import os
import csv

from .utils import jieba_parallel_cut, jieba_batch_cut, jieba_cut


def read_jsonl_dataset(jsonl_fp):
    with open(jsonl_fp, encoding='utf-8') as fi:
        for line in fi:
            data = json.loads(line.strip())
            keyno, text = data['keyno'], data['text']
            yield keyno, text


def convert_jsonl2txt(jsonl_fp, force=False):
    print('Converting jsonl dataset to txt dataset..')
    start = time.perf_counter()
    txt_fp = jsonl_fp.replace('.jsonl', '.txt')
    if os.path.exists(txt_fp) and not force:
        print('%s already exist. Delete the file if you want to update it.' % txt_fp)
        return txt_fp
    counter = 0
    with open(txt_fp, 'w', encoding='utf-8') as fo:
        for keyno, text in read_jsonl_dataset(jsonl_fp):
            fo.write('%s\n' % text.replace('\n', ' '))
            counter += 1
            if counter % 10000 == 0:
                print('\rConvert2txt: %d/%fs..' % (counter, time.perf_counter() - start), end='')
    print('\rConvert2txt: %d/%fs..' % (counter, time.perf_counter() - start), end='')
    return txt_fp


def process_news_jsonl_corpus(news_jsonl_fp, save_fp, parallel=False, force=True, batch_cut=True, each_count=500000):
    """
    将 jsonl 格式的新闻处理成切好词后的语料。
    """
    print('Preparing corpus..')
    txt_fp = convert_jsonl2txt(news_jsonl_fp, force)
    if parallel:
        jieba_parallel_cut(txt_fp, save_fp)
    elif batch_cut:
        jieba_batch_cut(txt_fp, save_fp, each_count)
    else:
        jieba_cut(txt_fp, save_fp)


def process_kuaixun_csv_corpus(csv_fp, save_fp, each_count=10000):
    txt_fp = csv_fp.replace('.csv', '.txt')
    fo = open(txt_fp, 'w', encoding='utf-8')
    with open(csv_fp, encoding='utf-8') as fi:
        reader = csv.DictReader(fi)
        for row in reader:
            fo.write('%s  %s\n' % (row['feed_content'], row['_c2']))
    jieba_batch_cut(txt_fp, save_fp, each_count=each_count)
