import gevent
from gevent import monkey

monkey.patch_all()

import time
import json
import os
import csv
import multiprocessing

import requests

from .news_preprocess import process_news
from .utils import get_random_uuid, merge_files, split_file, remove_files

requests.adapters.DEFAULT_RETRIES = 5  # 增加重连次数
s = requests.session()
s.keep_alive = True  # 关闭多余连接

qccdata_news_api = 'http://qccdata.qichacha.com/news/{}.json'
qccdata_theme_news_api = 'http://qccdata.qichacha.com/news/theme/{}.json'


def get_latest_news_keynos(num=1000000):
    """
    除了通过 mysql 获取新闻列表外，也可以通过 hive 查询 ods_company_extend_os.company_news_os 表。
    """
    print('Fetching news keynos..')
    start = time.perf_counter()
    res = requests.get('http://172.16.100.155:5000/fetch_latest_filtered_news?limit=%d' % num).json()
    keynos = res['keynos']
    consume = time.perf_counter() - start
    print('Got %d news keynos: %fs' % (len(keynos), consume))
    return keynos


def get_news_keyno_from_hive(fp):
    print('Fetching news keynos from %s..' % fp)
    start = time.perf_counter()
    keynos = []
    with open(fp, encoding='utf-8') as fi:
        reader = csv.DictReader(fi)
        for row in reader:
            keynos.append(row['newsid'])
    consume = time.perf_counter() - start
    print('Got %d news keynos: %fs' % (len(keynos), consume))
    return keynos


def generate_news_keyno_from_hive(fp):
    with open(fp, encoding='utf-8') as fi:
        reader = csv.reader(fi)
        for row in reader:
            yield row[0]


def get_news_by_url(news_keyno):
    try:
        url = qccdata_news_api.format(news_keyno)
        response = requests.get(url)
        if response.status_code == 404:
            url = qccdata_theme_news_api.format(news_keyno)
            response = requests.get(url)
        data = json.loads(response.content)
        data['Content'] = process_news(data['Content'], do_lowercase=False)
        text = '\n\n'.join([data['Title'], data['Content']])
        row = {'keyno': data['Id'], 'text': text}
        return row
    except Exception as ex:
        print(ex)
        return


def batch_get_news_by_oss(keynos, concurrency_count=250, output='data.jsonl', ret_res=False,
                          index_counter_start=None, index_counter_end=None):
    """
    多线程下载新闻。

    Parameters
    --------------
    keynos : list or generator or file path
    concurrency_count : int
        并行线程数目
    output : str
        输出文件
    ret_res : bool
        是否返回结果
    index_counter_start : int
    index_counter_end : int
    """
    print('Fetching newses..')
    start = time.perf_counter()
    i = 0
    tasks = []
    results = []
    fo = open(output, 'a', encoding='utf-8')
    index_counter = 0
    counter = 0
    if isinstance(keynos, str):
        keynos = generate_news_keyno_from_hive(keynos)
    for keyno in keynos:
        index_counter += 1
        if index_counter_start and index_counter < index_counter_start:
            print('\rSkip %d news..' % index_counter, end='')
            continue
        if index_counter_end and index_counter > index_counter_end:
            break
        tasks.append(gevent.spawn(get_news_by_url, keyno))
        i += 1
        if i == concurrency_count:
            res = gevent.joinall(tasks)
            batch = [g.get() for g in res]
            for row in batch:
                if row is not None:
                    counter += 1
                    fo.write(json.dumps(row, ensure_ascii=False) + '\n')
            if ret_res:
                results.extend([g.get() for g in res])
            tasks = []
            i = 0
        print('\r%d/%fs..' % (counter, time.perf_counter() - start), end='')
    if tasks:
        res = gevent.joinall(tasks)
        batch = [g.get() for g in res]
        for row in batch:
            if row is not None:
                counter += 1
                fo.write(json.dumps(row, ensure_ascii=False) + '\n')
        if ret_res:
            results.extend([g.get() for g in res])
        print('\r%d/%fs..' % (counter, time.perf_counter() - start), end='')
    consume = time.perf_counter() - start
    fo.close()
    print('Got %d newses saved in %s: %fs' % (counter, output, consume))
    if ret_res:
        return results


def batch_download_dataset(news_keynos_fp, each_count, output):
    """
    将新闻列表文件分裂成多个，多进程并行下载。
    """
    start = time.perf_counter()
    output_prefix, output_suffix = os.path.splitext(output)
    output_prefix = '%s-%s' % (output_prefix, get_random_uuid())
    print('Split %s..' % news_keynos_fp)
    news_keynos_sub_fps = split_file(news_keynos_fp, each_count=each_count)
    sub_outputs = []
    processes = []
    print('Fetching news from %d sub files..' % len(news_keynos_sub_fps))
    for index, news_keynos_sub_fp in enumerate(news_keynos_sub_fps):
        sub_output = '%s-%d%s' % (output_prefix, index, output_suffix)
        p = multiprocessing.Process(target=batch_get_news_by_oss,
                                    args=(news_keynos_sub_fp,),
                                    kwargs={'output': sub_output})
        processes.append(p)
        sub_outputs.append(sub_output)
    for p in processes:
        p.start()
    for p in processes:
        p.join()
    print('Merging news from %d sub output..' % len(sub_outputs))
    merge_files(sub_outputs, output)
    print('Removing sub files..')
    remove_files(news_keynos_sub_fps)
    # 保留多文件的输出结果供下游使用
    # remove_files(sub_outputs)
    print('Done! Consume: %fs' % (time.perf_counter() - start))
    return output, sub_outputs
