#encoding:utf8


"""
itjuzi 根据 搜索分类来进行处理, 取得前十页的内容
"""

import time
import json
import re
import pdb
import traceback
from bs4 import BeautifulSoup as BS
from threading import Thread


from utils.SpiderDBApi import SpiderDBApi
from utils.SpiderUrlApi import SpiderUrlApi
from utils.UrlParserApi import UrlParserApi
from utils.gen_interval_time import get_interval
from utils.get_links import getLinks
from utils.tools import get_url_root
from utils.Exceptions import *

from logger import logger

from config import source,  opener_type, PRINT_INTERVAL, PARSER_INTERVAL
from config import start_urls, re_invalid_urls, re_valid_urls
from config import db_config, urldb_config, opener_config
from parsers import get_content
from check_page import check_error_page, check_login_page

urldb_server = SpiderUrlApi(urldb_config['host'], urldb_config['auth'])
db_server = SpiderDBApi(db_config["host"], db_config['auth'])

# opener = UrlParserApi(opener_config['host'], opener_config['auth'])
# if login:: must use local opener
from opener import Opener
opener = Opener()

EXIT_CODE = 0            # for global control
SLEEP_TIME_FOR_URL = 10  # sleep time when get no url
MAX_ERROR_FETCH_URL = 3  # trying time
SLEEP_INTERVAL = None  # None, use system human-like interval, 0 for nothing, number for constant sleep time


def start():
    # start spider url queue
    logger.warn("start url server queue ...")
    start_url_server_queue()

    # 插入 start urls
    logger.warn("insert start urls ...")
    insert_start_urls()
    time.sleep(1)

    # 初始化状态
    monitor = SpiderMonitor()
    th = Thread(target=print_status, args=(monitor, ))
    th.setDaemon(True)
    th.start()

    # start spider
    while 1:
        if EXIT_CODE == 1:
            logger.warn("exit for exit_code = 1")
            return
        # 获取 url
        logger.debug("get url ... ")
        data = get_url()
        if data is None:
            monitor.error_fetch_url += 1
            if monitor.error_fetch_url > MAX_ERROR_FETCH_URL:
                raise UrlFetchException("{} times has tried".format(MAX_ERROR_FETCH_URL))
            if EXIT_CODE == 1:
                logger.warn("Exit for exit_code=1")
                return
            logger.warn("Sleep {0}s for getting url".format(SLEEP_TIME_FOR_URL))
            time.sleep(SLEEP_TIME_FOR_URL)
            continue
        monitor.error_fetch_url = 0

        id, url = data['id'], data['url']
        base_url = get_url_root(url).strip("/")
        logger.debug("parse: id[{}], url[{}]".format(id, url))

        # 获得 url 的source
        logger.debug("get source ... ")
        content = get_source(url)
        if data is None:
            monitor.error_parse_url += 1
            try:
                urldb_server.update_status({"id": id, "status":-1})
            except KeyboardInterrupt, ex:
                raise
            except Exception, ex:
                logger.error("[UPDATE STATUS], can not update status to -1")
            continue

        # 判断 是否是出错页面
        try:
            check_error_page(url, source)
            check_login_page(url, source)

            bs = BS(content, 'lxml')
            if not bs:
                monitor.error_parse_url += 1
                continue

            # 获得 所需要的数据
            logger.debug("get content ... ")
            results = get_content(url, source, bs)
            if results:
                logger.debug("save content ... ")
                cnt = save_content(results, monitor)

            # 获得url的links
            links = getLinks(base_url, source)

            # 判断是否应该加入到 url server
            logger.debug("save links ... ")
            _cnt = save_links(links)
            monitor.cnt_insert_new_link += _cnt
            logger.debug("updated: {}".format(_cnt))

            # update url server status
            logger.debug("update status ... ")
            update_url_status(url)

            # sleep some time
            if SLEEP_INTERVAL is None:
                time.sleep(get_sleep_time())
            elif SLEEP_INTERVAL == 0:
                pass
            else:
                time.sleep(SLEEP_INTERVAL)

        except LoginException, ex:
            logger.warn("[LOGIN], need login ... sleep for 100 seconds")
            urldb_server.update({"id": id, "status": -1})
            time.sleep(100)
            opener.login()
        except KeyboardInterrupt, ex:
            urldb_server.update_status({"id": id, "status": 0})
            raise
        except Exception, ex:
            traceback.print_exc()
            urldb_server.update({"id": id, "status": -1})
            # return


def print_status(monitor):
    global EXIT_CODE
    while 1:
        try:
            print(monitor.status())
            time.sleep(PRINT_INTERVAL)
        except KeyboardInterrupt, ex:
            EXIT_CODE = 1
            raise


def start_url_server_queue():
    resp = urldb_server.start_queue()
    code, msg, data = resp['code'], resp['msg'], resp['data']
    if code != 0:
        raise Exception("Url Server Queue can not started: code: {}, msg: {}, data: {}".format(
            code, msg, data
        ))
    print(msg)


def insert_start_urls():
    for url in start_urls:
        print("insert: {}".format(url))
        data = dict(id=url, url=url, source=source)
        urldb_server.insert(data)


def get_url():
    data = urldb_server.get()
    code, msg, data = data['code'], data['msg'], data['data']
    if code != 0:
        return None
    return data


def get_source(url):
    data = opener.get_source(url, by=opener_type)
    code, msg, data = data['code'], data['msg'], data['data']
    if code != 0:
        return None
    return data


def save_content(results, monitor):
    cnt = 0
    # for result in results:
    #     resp = db_server.insert(result)
    #     code, msg, data = resp['code'], resp['msg'], resp['data']
    #     if code != 0:
    #         monitor.error_insert_content_error +=1
    #     else:
    #         monitor.cnt_insert_content += 1
    #         cnt += 1
    # return cnt
    if results:
        db_server.insert_bulk(results)
    return len(results)


def save_links(links):
    link_set = set()
    for link in links:
        flag = 0
        if re_valid_urls:
            for p in re_valid_urls:
                if re.match(p, link):
                    flag = 1
                    break
        else:
            flag = 1
        for p in re_invalid_urls:
            if re.match(p, link):
                flag = 0
                break
        if flag:
            link_set.add(link)

    valid_links = []
    for link in link_set:
        data = dict(id=link, url=link)
        # resp = urldb_server.insert(data)
        # if resp['code'] == 0:
        #     cnt += 1
        valid_links.append(data)
    urldb_server.insert_bulk(valid_links)
    return len(valid_links)


def update_url_status(id, status=10):
    data = dict(id=id, status=status)
    return urldb_server.update_status(data)


def get_sleep_time():
    # from utils.gen_interval_time import get_interval
    return get_interval() * PARSER_INTERVAL


class SpiderMonitor(object):
    def __init__(self):
        self.error_fetch_url = 0
        self.error_parse_url = 0
        self.error_insert_content_error = 0

        self.cnt_fetch_url = 0
        self.cnt_insert_content = 0
        self.cnt_insert_new_link = 0

        self.monitor = time.time()

    def status(self):
        span = time.time()
        t = span - self.monitor
        s = "Time: {0}s, Parsed: {1), Content: {2}, NewLink: {3}".format(t,
            self.cnt_fetch_url, self.cnt_insert_content, self.cnt_insert_new_link
        ) + " ; Error: Fetched: {0}, Parsed: {1}, Content Inserted: {2}".format(
            self.error_fetch_url, self.error_parse_url, self.error_insert_content_error
        )
        return s

# start the spider
start()



