# -*- coding: utf-8 -*-
import logging
import sys
import threading
import time
import traceback
import uuid
from datetime import datetime
from os import popen, getpid, path
from threading import Event

import oss2
import redis
import six.moves.urllib_parse as parse

from plugin.configs import configs, env
from plugin.db.mongodb_utils import MongoDBTool
from plugin.utils.check_files_utils import CheckFileIntergtiry
from plugin.utils.common_utils import get_today_str, utils_mongo_time, get_time, \
    load_resault, dumps_resault
from plugin.utils.process_utils import GracefulKiller
from plugin.utils.remote_utils import HttpClient as http_request

redis_db = redis.StrictRedis(**configs['redis'])

mongodb_or = MongoDBTool(uri=configs['mongo_or']['mongo_uri'], db=configs['mongo_or']['mongo_db'])

redis_done_host = 'task_host'
redis_done_taskseq = 'task_seq'
redis_done_pid = 'task_pid'
mongo_log_task = configs['mongo_log_task']
api_host = configs.get('log_api_host')
aliyun_config = configs['aliyun_oss']
aliyun_bucket = oss2.Bucket(
    oss2.Auth(aliyun_config['key'], aliyun_config['secret']),
    aliyun_config['endpoint'], aliyun_config['bucket'])


class RedisConsumerThreadPool(object):
    def __init__(self):
        self.pools = {}

    def start(self, killer, queue=configs['redis_table'], num=configs['max_process_count'], max_retry=3):
        logging.info('main process started........')
        for index in range(num):
            t = threading.Thread(target=run, name=str(index), args=(killer, str(index), queue, max_retry))
            t.start()
            logging.info('thread[' + str(index) + '] started..........')
            self.pools[index] = t
        hostname = get_hostname()
        while len(self.pools) > 0:
            alive_count = 0
            if killer.kill_now:
                break
            for index, t in dict(self.pools).items():
                # 每10秒检查一次进程.
                if killer.kill_now:
                    break
                name = t.getName()
                if t.isAlive():
                    alive_count += 1
                    redis_db.hset(configs['redis_alive_hset'], hostname, str(datetime.now()))
                else:
                    self.pools.pop(index)
                    logging.error(('process[%d] %s dead!' % (index, name)))
                    t = threading.Thread(target=run, args=(killer, str(index), queue, max_retry))
                    t.start()
                    self.pools[index] = t
            # logging.info('Checking alive thread count is %s' % alive_count)
            time.sleep(2)
        while self.pools:
            for index, t in dict(self.pools).items():
                name = t.getName()
                if t.isAlive():
                    logging.error('killed error process[{}-{}] isAlive'.format(index, name))
                else:
                    del self.pools[index]
            time.sleep(2)
        logging.info('main process was killed gracefully :)')


def get_hostname():
    return popen('hostname').read().replace('\n', '')


def run(killer, pname, consumer_queue=configs['redis_table'], max_retry=3):
    hostname = get_hostname()
    _event = Event()
    while True:
        # 外层+循环，防止连接失效等问题.
        if killer.kill_now:
            break
        try:
            if consumer_queue.endswith('_fail'):
                failed_queue = consumer_queue
            else:
                failed_queue = consumer_queue + '_fail'

            while True:
                if killer.kill_now:
                    break

                raw_data = redis_db.lpop(consumer_queue)
                if raw_data is None:
                    logging.info('p[' + pname + '] queue is empty, waiting......')
                    time.sleep(1)
                    continue

                try:
                    data = load_resault(raw_data)
                except Exception as ex1:
                    # 根本错误，无法回复
                    redis_db.rpush(failed_queue, raw_data)
                    traceback.print_exc(file=sys.stdout)
                    logging.error(ex1)
                    continue
                try:

                    # 记录处理痕迹
                    data_key = data.get('key')
                    if data_key is None:
                        data_key = data['item']['src_id']
                    data['is_done'] = 0
                    mongodb_or.save_mongodb_time(data, {'key': data_key}, mongo_log_task)
                    data['retry'] = data.get('retry', 0)
                    if data['retry'] > max_retry:
                        logging.info('max_rerey {} ,give up. {}'.format(max_retry, data_key))
                        redis_db.rpush(failed_queue, raw_data)
                        continue
                    data['retry'] = data['retry'] + 1
                    data['client'] = hostname
                    data['pname'] = pname
                    pid = get_exec_id()
                    data['pid'] = pid
                    data['is_done'] = 0
                    result = handle_item(data)
                    if not result[0]:
                        data.update({'reason': result[1]})
                        mongodb_or.delete_data({'key': data_key}, mongo_log_task)
                        repush_data(data, consumer_queue)
                    else:
                        data['is_done'] = 1
                        is_china_timezone = data.get('is_china_timezone', True)
                        mongodb_or.save_mongodb_time(data, {'key': data_key}, mongo_log_task, is_china_timezone)
                        send_msg(data['item'])
                        redis_db.hincrby(redis_done_host + '_done_count', hostname)
                        # 排序更漂亮
                        redis_db.hincrby(redis_done_pid + '_' + get_today_str(),
                                         hostname + '_' + str(pid) + "_" + str(pname))
                except Exception as ex1:
                    repush_data(data, consumer_queue)
                    traceback.print_exc(file=sys.stdout)
                    logging.error(ex1)
        except Exception as ex:
            traceback.print_exc(file=sys.stdout)
            logging.error(ex)
    logging.info('p[' + pname + '] end of the program. was killed gracefully :)')


def send_msg(report):
    trace_id = report.get('trace_id', '')
    try:
        source_id = report.get('source_id', '')
        msg = '文件入库。标题：{}，时间：{}，数据源编号：{}，source_url：{}'.format(
            report.get('title', ''), report.get('time', ''), source_id,
            report.get('source_url', ''))
    except Exception as ex:
        logging.error('send msg failed:%s, message:%s' % (trace_id, ex))


def repush_data(input_data, queue):
    if input_data is None:
        return
    # 清空mongo 变更的内容
    if input_data.get('_id'):
        input_data.pop('_id')
    if input_data.get('create_time'):
        input_data.pop('create_time')
    if input_data.get('update_time'):
        input_data.pop('update_time')
    redis_db.rpush(queue, dumps_resault(input_data))


def get_exec_id():
    return str(getpid()) + '-' + str(threading.currentThread().ident)


def handle_callback_mongo(callback_mongo, item):
    if callback_mongo is None:
        return
    mongo_url = callback_mongo.get('mongo_url', None)
    mongo_db = callback_mongo.get('mongo_db', None)
    mongo_col = callback_mongo.get('mongo_col', None)
    query_dict = callback_mongo.get('key', None)
    fields = callback_mongo.get('fields', None)
    convert_tzone = callback_mongo.get('convert_tzone', True)
    file_url = item['file_url']

    if mongo_url is None or mongo_db is None or mongo_col is None:
        logging.warning("has not passed correct mongo config#%s" % str(query_dict))
        return

    if query_dict is None:
        query_dict = {'file_url': file_url}

    mongodb_tool = MongoDBTool(uri=mongo_url, db=mongo_db)

    if fields is None:
        result = mongodb_tool.save_mongodb_time(item, query_dict, mongo_col, convert_tzone)
    else:
        update_item = {}
        for field in fields:
            if field in list(item.keys()):
                update_item[field] = item.get(field)
        update_item['update_time'] = datetime.now()
        result = mongodb_tool.save_mongodb_time(update_item, query_dict, mongo_col, convert_tzone)

    if not result:
        logging.warning('mongodb table has not record#%s' % str(query_dict))


def handle_item(kwargs):
    """
    fetch one item to handle
    :param kwargs: 反序列化的字典
    :return:
    """
    try:
        db = kwargs.get('db')
        table = kwargs.get('table')
        source_id = kwargs.get('source_id')
        source_identity = kwargs.get('source_identity')
        if source_id is None:
            source_id = source_identity
        task_seq = kwargs.get('task_seq')
        data_key = kwargs.get('key')
        t1 = kwargs.get('crawl_time')
        if t1 is None:
            t1 = get_time()
        save_type = kwargs.get('save_type', 'file')
        item = kwargs['item']
        callback_mongo = kwargs['callback_mongo']
        logging.info('task begin # {} # {} # {}'.format(source_identity, save_type, data_key))

        if save_type == 'file' or save_type == 'text':
            result = process_file(kwargs, source_id, item, save_type)
            # 处理上传异常
            if not result[0]:
                return False, 'upload file error#' + result[1]
            handle_callback_mongo(callback_mongo, item)

        t2 = get_time()
        logging.info('task finished # {} # {} # {} # {}'.format(source_identity, save_type, t2 - t1, data_key))
        redis_db.hincrby(redis_done_taskseq + '_' + get_today_str(), task_seq)
        return True, ""
    except Exception as e:
        traceback.print_exc(file=sys.stdout)
        logging.error(('other_exception %s,kwargs:%s' % (e, str(kwargs))))
        return False, 'other_exception %s' % e


def process_file(kwargs, source_id, item, save_type):
    extension = kwargs.get('extension')
    if extension is None:
        return False, 'error setting kwargs[extension]'
    if not extension.startswith('.'):
        extension = '.' + extension

    requests_data = kwargs.get('requests_data')
    content_type = kwargs.get('content_type')
    data_key = kwargs.get('key')
    retry = kwargs.get('retry', 0)

    # 处理缺省的oss_path
    oss_path = item.get('oss_path')
    if oss_path is None or oss_path == '':
        # oss_path = 'ft%s/%s/%s%s' % (ds_id, get_today_str(),
        #                                str(uuid.uuid3(uuid.NAMESPACE_DNS, str(item['file_url']))), extension)
        oss_path = '%s/%s%s' % (source_id,
                                str(uuid.uuid3(uuid.NAMESPACE_DNS, str(item['file_url']))), extension)
    # 测试环境的数据，增加test前缀，便于集中清空
    if env == 'test':
        oss_path = 'test_' + oss_path

    # 数据是否是远程地址
    is_remote_file = (save_type == 'file')

    if not is_remote_file:
        # 上传文本数据
        data = requests_data.encode()
    else:
        # 下载标准文件
        if not requests_data.get('method'):
            requests_data['method'] = 'get'
        url = requests_data.get('url')
        if not url.startswith('http'):
            item = kwargs.get('item')
            if item:
                source_url = item.get('source_url')
                if source_url:
                    fix_url = parse.urljoin(source_url, url)
                    requests_data['url'] = fix_url
                    kwargs['item']['file_url'] = fix_url

        if retry >= 2:
            logging.info('find:%s has retry time>=2,sleep 5 seconds' % str(data_key))
            requests_data.update({'sleep_time': 5})
        data, fetch_res = http_request().get_response(**requests_data)
        logging.info('request finished # {}'.format(data_key))
        kwargs.update(fetch_res)
        if data is None:
            logging.error('requests download fail # {}'.format(data_key))
            return False, 'requests download fail'
        if not CheckFileIntergtiry().check(data.content, extension):
            logging.error('check file intergtiry fail # {}'.format(data_key))
            return False, 'check file intergtiry fail'

    # 上传pdf 等文件
    logging.info('uploader_file begin # {}'.format(data_key))
    (downloaded, length, msg) = uploader_file(data, oss_path, content_type, is_remote_file)
    logging.info('uploader_file finished # {}'.format(data_key))
    if downloaded:
        # 下载成功的存库
        item.update({'downloaded': downloaded, 'oss_path': oss_path, 'file_size': length,
                     'downloaded_time': utils_mongo_time(datetime.now())})
        return True, ''
    else:
        logging.warning('fail to process record#%s, %s' % (str(data_key), msg))
        return False, msg


def uploader_file(data, oss_path, content_type, is_remote_file=True):
    is_download = configs['downloaded']
    if is_remote_file:
        content = data.content
    else:
        content = data

    # edb 的测试环境，强制下载，便于线上测试
    if oss_path.startswith('test_edb'):
        is_download = True
    if is_download:
        # 线上环境进行上传阿里云
        if not data:
            return (False, 0, 'download failed')
        if 'pdf' in content_type.lower():
            header_content_type = data.headers.get('Content-Type')
            # https://www.iyiou.com/intelligence/download?id=540, application/octet-stream
            if 'pdf' not in content_type and 'stream' not in header_content_type:
                msg = 'fail to download,oss_path:%s,Content_Type:%s,response_content_type:%s' % (
                    str(oss_path), content_type, header_content_type)
                logging.warning(msg)
                return False, 0, msg
            if len(data.content) <= 100:
                msg = 'size is too less, oss_path:%s,size:%d' % (oss_path, len(data.content))
                logging.warning(msg)
                return False, 0, msg
            # if ('pdf' not in magic.from_buffer(data.content).lower()) and ('data' not in magic.from_buffer(data.content).lower()):
            #     logging.warning('magic judge is not pdf,magic_result:%s,oss_path:%s' % (magic.from_buffer(data.content).lower(),oss_path))
            #     return False
        # TODO 判断属性，是否是流对象
        # FOR DEBUG
        # downloaded, length, msg = True, len(content), 'done'
        downloaded, length, msg = aliyun_update_file(content, oss_path, content_type)
        return downloaded, length, msg
    else:
        if not data:
            logging.info(('null data', oss_path))
            return False, 0, 'download failed'

        # 线下环境存库不存阿里云,可选下载pdf
        tempdir = configs['download_temp_dir']
        local_file = '%s/%s' % (tempdir, oss_path.replace('/', ''))
        with open(local_file, 'wb') as f:
            f.write(content)
        local_size = path.getsize(local_file)
        return True, local_size, 'download done'


def aliyun_update_file(data, filename, content_type=None):
    """
    上传pdf文件到阿里云服务器
    :param data: 文件数据
    :param filename: 存储的文件名
    :return:
    """
    if content_type:
        headers = {"Content-Type": content_type}
    else:
        headers = None
    msg = ''
    try:
        # 直接覆盖，不考虑是否存在
        result = aliyun_bucket.put_object(key=filename, data=data, headers=headers)
        if result.status != 200:
            msg = 'upload failed'
        else:
            head_result = aliyun_bucket.head_object(filename)
            remote_length = head_result.content_length
            local_length = len(data)
            if local_length == remote_length:
                return True, remote_length, 'upload done'
            else:
                msg = 'upload file %s failed, size error %s/%s' % (filename, str(local_length), str(remote_length))
                aliyun_bucket.delete_object(filename)
    except Exception as e:
        traceback.print_exc(file=sys.stdout)
        msg = 'upload file %s failed, error %s' % (filename, e.message)
    logging.error(msg)
    return False, 0, msg


def start_consumer():
    graceful_killer = GracefulKiller()
    RedisConsumerThreadPool().start(graceful_killer)
