import time
import traceback
import os
import sys
import json
from multiprocessing import Process, Queue, Pool, Manager, cpu_count
from html_downloader import HtmlDownloader
from url_manager import UrlManager
from html_parser import HtmlParser
from html_outputer import HtmlOutputer
from dbpool import install
from .exceptions import ConfigValueMissingException

############
QUEUE_NUM_DEFAULT = 10000

############
NOT_FOND = []
TOTAL_NUM = 0
this_num = 0


class CrawlBaseHandle(object):
    crawlname = ''
    # 一般配置信息
    gen_conf = {
        "crawl_process_num": 4,  # 爬虫进程数量
        "save_process_num": 1,  # 保存数据进程数量
        "queue_num": 10000,   # 队列大小
    }
    database_conf = {
        # 本机root数据库
        'crawl': {
            'engine': 'pymysql',   # db type, eg: mysql, pymysql, sqlite
            'host': '127.0.0.1',    # db host
            'port': 3306,  # db port
            'user': 'root',   # db user
            'passwd': 'shuiqing',   # db password
            'db': 'crawl',    # db name
            'charset': 'utf8',  # db charset
            'conn': 16,  # db connections in pool
        },
    }

    # url配置信息
    url_conf = {
        # type取值：fixed，database
        "type": "database",
        "db_conf": {
            'database': 'crawl',
            'table': 'zybd_jibing',  # db table
            'fields': 'url',  # db field
            'other': ''
        },
        "value": "url",  # 当type为fixed时使用此url做为基础的请求url
        # 对url做字符串的替换, 使用的是re.sub
        "replace": {
            r'^': "http://127.0.0.1:8080"
        },
    }
    # 爬虫获取字段信息
    parse_html_conf = {
        "department_id": {
            'type': 'html',
            'target': 'li_list',
            'parse_conf': [
                ('class', 'ul-ss-3 jb-xx-ks', '1'),
                ('inner', 'li', '0'),
            ],
            'value_map': {
                # from_db, from_file, from_value
                "type": "from_db",
                "db_conf": {
                    'database': 'crawl',
                    'table': 'zybd_department',  # db table
                    'fields': ('name', 'id'),
                },
                "file_path": "",
                "key_field": "name",
                "value_field": 'id',
            }
        },
        "jibing_id": {
            # type: database,  html, value
            "type": "database",
            "db_conf": {
                'database': 'crawl',
                'table': 'zybd_jibing',  # db table
                'fields': 'id',
                'where': {"url": "&1&"},  # db field
                'replace': {
                    '&1&': 'url',
                    'http://127.0.0.1:8080': ''
                }
            },
            # 对url做字符串的替换
            "value": 'id',
            "replace": {}
        }
    }
    # 下载配置
    html_down_conf = {
        "user_agent": "&random&",  # 需要加入的user_agent值 &random& 代表随机
        "add_header": {},  # 需要加入的新的header值
        "sleep": 0,  # 两个url下载之间的间隔时间
    }
    # 解析字段追加url配置信息
    parse_url_conf = {
        "crawl_field": "url",  # 要获取的字段
        "base_url": "",  # 需要添加的ip地址或域名
        "url_replace": []  # 需要对url执行替换的字符串
    }
    save_conf = {
        # "type": "db_insert",  # type取值："file" or "db_insert" or "db_update"
        "type": "db_insert",  # type取值："file" or "db_insert" or "db_update"
        "db_conf": {    # type为db时使用,数据库的基本配置
            'database': 'crawl',
            'table': 'zybd_jibing_department',  # db table
        },
        "file_conf": {  # type 为file时使用, 文件名是系统自动生成的
            "filepath": "./aa.txt"  # 要保存文件的路径
        },
        "field_conf": {
            "jibing_id": {
                'type': 'from_parse_field',
                'field': "jibing_id"
            },
            "department_id": {
                'type': "from_parse_field",
                'field': "department_id"
            }
        },
        'list_field': 'department_id'

    }

    def __init__(self):
        self.urlmanager = UrlManager(self.url_conf)
        self.downloader = HtmlDownloader(self.html_down_conf)
        self.parser = HtmlParser(self.parse_html_conf, self.parse_url_conf)
        self.saver = HtmlOutputer(self.save_conf)
        # self.outputer = HtmlOutputer()


    def __str__(self):
        return self.crawlname

    def crawl_process(self, url_q, save_q, had_stop_q, processname):
        print(("Starting processName: %s pid: %s" % (processname, str(os.getpid()))))
        while not url_q.empty():
            try:
                # 获取url
                url = url_q.get(timeout=2)
                print('processname:%s %s: %s' % (processname, url_q.qsize(), url))
                # 下载html
                html_cont = self.downloader.download(url)
                # 解析html
                new_url_list, save_data = self.parser.parse(url, html_cont)
                # 传递数据向save_process
                if save_data:
                    save_data = json.dumps(save_data)
                    save_q.put(save_data)
                # 增加新的url
                if new_url_list:
                    for u in new_url_list:
                        url_q.put(u)
            except Exception as e:
                print(traceback.format_exc())
                print(e)
        had_stop_q.put(processname)
        print(("Stop processName: %s pid: %s" % (processname, str(os.getpid()))))

    def save_process(self, save_q, had_stop_q, processname):
        print(("Starting processName: %s pid: %s" % (processname, str(os.getpid()))))
        while True:
            if save_q.empty():
                if had_stop_q.qsize() >= self.gen_conf['crawl_process_num']:
                    break
                time.sleep(1)
                continue
            save_data = save_q.get(timeout=2)
            save_data = json.loads(save_data)
            print('save data: %s' % save_data)
            try:
                if save_q.empty():
                    self.saver.save(save_data, now=True)
                else:
                    self.saver.save(save_data)
            except Exception as e:
                print(traceback.format_exc())
                print(e)

    def start(self):
        try:
            # 验证配置
            self.verify_conf()
            if self.database_conf:
                install(self.database_conf)
            craw_process_list = ["Craw Process-" + str(i + 1) for i in range(self.gen_conf['crawl_process_num'])]
            save_process_list = ["Save Process-" + str(i + 1) for i in range(self.gen_conf['save_process_num'])]
            # 初始化url queue
            self.urlmanager.init_url()
            # 初始化save queue
            manager = Manager()
            save_queue = manager.Queue(self.gen_conf.get('queue_num', QUEUE_NUM_DEFAULT))
            had_stop_queue = manager.Queue(self.gen_conf.get('queue_num', QUEUE_NUM_DEFAULT))
            print('Parent process %s.' % os.getpid())
            time1 = time.time()
            # 初始化进程池
            pool = Pool(self.gen_conf['crawl_process_num'] + self.gen_conf['save_process_num'])
            for name in craw_process_list:
                pool.apply_async(self.crawl_process, args=(self.urlmanager.url_queue, save_queue, had_stop_queue, name))
            for name in save_process_list:
                pool.apply_async(self.save_process, args=(save_queue, had_stop_queue, name))
            print('Waiting for all subprocesses done...')
            pool.close()
            pool.join()
            time2 = time.time()
            print(time2 - time1)
            print('All subprocesses done.')
        except Exception as e:
            print(traceback.format_exc())
            print(e)

    def verify_conf(self):
        for conf in ["crawl_process_num", "save_process_num", "queue_num"]:
            if self.gen_conf.get(conf) is None:
                raise ConfigValueMissingException(conf)

if __name__ == "__main__":
    CrawlBaseHandle().start()
