#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
# import sys
import logging
from pymongo import MongoClient
from datetime import datetime
from crawler import get_links
from parser import job_parser
from crawler import process_crawler, threaded_crawler
from mongotool import MongoCache
from utilities import make_random_useragent, get_ipproxies


seed_url = 'https://www.lagou.com'

# logging.basicConfig(stream=sys.stderr, level=logging.INFO)

UA = make_random_useragent()
headers = {
    'User-Agent': UA,
    'Host': 'www.lagou.com',
    'Origin': 'http://www.lagou.com',
    'Accept': '*/*',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'zh-CN,zh;q=0.8',
    'X-Requested-With': 'XMLHttpRequest'
}
# proxies = None # not useip proxy
proxies = get_ipproxies()


# 定义URL过滤函数作为爬虫进程的回调


def scrape_callback(url, html):
    '''[URL FILTER] parse html to get legal url,
    and define your own regular expression to obtain specific url for download later
    '''
    links = []
    link_regex = re.compile('https://www.lagou.com\S*?')
    if link_regex:
        # filter for links matching our regular expression
        links.extend(link for link in get_links(
            html) if re.match(link_regex, link))
    else:
        links = get_links(html)
    return links


# 定义URL判断函数作为下载器的回调


class DownloadCallback(object):
    '''符合条件的url调用相应的parser解析html页面并存储数据到Mongo db
    '''

    def __init__(self, url, html, client=None):
        # if a client object is not passed
        # then try connecting to mongodb at the default localhost port
        self.client = MongoClient(
            'localhost', 27017, maxPoolSize=200, connect=False,) if client is None else client
        self.url = url
        self.html = html
        # create collection to store data,
        # which is the equivalent of a table in a relational database
        self.db = self.client.lagou
        self.db.jobs.create_index('timestamp')
        # filter url, if fit, parse html and store data
        job_regex = re.compile('https://www.lagou.com/jobs/\d*?\.html')
        if re.match(job_regex, self.url):
            result = job_parser(self.html)
            if result:
                # Save value for this URL
                record = {'result': result, 'timestamp': datetime.now()}
                try:
                    self.db.jobs.update({'_id': self.url}, {
                                        '$set': record}, upsert=True)
                except Exception as e:
                    logging.error(
                        '[run.py-DownloadCallback]>>mongodb stored failed -- error: %s', e)
            else:
                pass
                # logging.error(
                #     '[DownloadCallback] no data to store >> wrong html page to parse')


if __name__ == '__main__':
    print 'Spider starting >>>'
    print '========================================'
    print 'logging:'
    cache = MongoCache()
    threaded_crawler(seed_url, delay=1, cache=cache, scrape_callback=scrape_callback,
                     download_callback=DownloadCallback, headers=headers, proxies=proxies,
                     num_retries=1, max_threads=10, timeout=60)
    # process_crawler(seed_url, num_process=2, delay=1, cache=cache, scrape_callback=scrape_callback,
    #                 download_callback=DownloadCallback, headers=headers, proxies=proxies, num_retries=1,
    #                 max_threads=10, timeout=60)
