# -*- coding: utf-8 -*-

import requests

from autoscrapy import config
from autoscrapy import linkfilter
from autoscrapy import crawl
from autoscrapy import extract
from autoscrapy import item
from autoscrapy.db import base as basev1
from autoscrapy.utils import log
from autoscrapy.utils import linksnode


LOG = log.get_logging(__name__)

class autoscrapy(object):
    ROOT_URLS = []
    SECOND_URLS = []
    THIRD_URLS = []

    def __init__(self, start_url=None):
        self.start_url = start_url

        # autoscrapy database context
        self.context = self.get_scrapy_context()

        # linksfilter used to filter dumplicate link
        self.link_filter = linkfilter.linkFilter(self.context)

        # crawl used to fetch web contents
        self.crawl = crawl.crawl(self.context)

        # extract body
        self.extract = extract.extract(self.context)

        # item to db
        self.item = item.item(self.context)

    def get_scrapy_context(self):
        context = {
            'session': basev1.get_session(),
            'config': config,
        }
        return context

    def process_url(self, url, ret_urls):
        # 1. detect dumplicate url
        # 2. extract all links
        # 3. auto extract main contents
        # 4. add url to recoder dbs
        main_content = None
        if self.link_filter.is_duplicate_url(url):
            return main_content

        self.link_filter.add_url(url)
        web_content = self.crawl.fetch(url)
        if not web_content:
            return main_content

        urls = self.extract.get_all_links(web_content)
        if ret_urls is not None:
            ret_urls.extend(urls)

        main_content = self.extract.extract_main_content(web_content)
        if main_content:
            main_content['fromsrc'] = url
        return main_content


    def auto_crawl(self):
        web_content = self.crawl.fetch(self.start_url)
        if not web_content:
            LOG.error("crawl first link fail")
            return
        # we crawl deepth is 3
        self.ROOT_URLS = self.extract.get_all_links(web_content)
        for _url in self.ROOT_URLS:
            main_content = self.process_url(_url, self.SECOND_URLS)
            self.item.add_item(main_content)

        for _url in self.SECOND_URLS:
            main_content = self.process_url(_url, self.THIRD_URLS)
            self.item.add_item(main_content)

        for _url in self.THIRD_URLS:
            main_content = self.process_url(_url, None)
            self.item.add_item(main_content)

    def start(self):
        LOG.info('start autoscrapy url %s', self.start_url)
        # main
        self.auto_crawl()
        LOG.info('end autoscrap url %s', self.start_url)


def get_crawl_urls(limit=1):
    try:
        url = []
        request_url = config.server_url + 'links?limit=%d'%limit
        resp = requests.get(request_url)
    except Exception as e:
        LOG.exception("Get server %s fail of exception %s", request_url, e)
        return url

    if resp.status_code == 200:
        for _url in resp.json()['links']:
            if _url['type'] == 'url':
                url.append(_url['seed'])
    else:
        LOG.error("Request url %s return code %d", request_url, resp.status_code)
    LOG.info("Requst return urls %s", url)
    return url


##
# main
##
def main():
    for url in get_crawl_urls():
        autoscrapy(url).start()
main()
