#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time: 2019/6/2  18:30
# @Author: 余浪人
# @email: yulangren520@gmail.com

from apps.spiders.crawler.spider_detail import re_analysis_detail, xpath_analysis_detail
from apps.spiders.crawler.spider_list import xpath_analysis_list, re_analysis_list
from apps.spiders.crawler import queue



def collector_list(obj):
    if obj.URL_create and obj.create_is_range:  # 是否批量采集
        start, end = obj.create_range.split('|')[0], obj.create_range.split('|')[-1]
        if obj.url_is_desc:  # 是否倒序采集
            url = [obj.URL_create.replace('{$ID}', str(i)) for i in range(int(end), int(start) - 1, -1)]  # 倒序采集
        else:
            url = [obj.URL_create.replace('{$ID}', str(i)) for i in range(int(start), int(end) + 1)]  # 顺序采集
    else:
        url = [newUrl for newUrl in obj.fixed_URL.splitlines()]  # 指定url
    if obj.rule_cls == 1:  # xpath
        for page in url:
            xpath_analysis_list(page, obj)
    else:  # 正则式
        for page in url:
            re_analysis_list(page, obj)


def collector_detail(data):  # 获取一个url
    url = data.get('detail_url')
    obj = data.get('obj')
    if obj.rule_cls == 1:  # 判断其对应规则解析
        xpath_analysis_detail(url=url, q_obj=obj)
    else:
        print('开始抓取链接:',url)
        re_analysis_detail(url=url, q_obj=obj)


def spider_start(obj): # 开始
    collector_list(obj)  # 处理列表页面,提取详情链接
    try:
        data = queue.get_nowait()
    except:
        data = ''
    while data:
        collector_detail(data)
        try:
            data = queue.get_nowait()
        except:
            print('url列表为空!')
            break
    return




# from apscheduler.schedulers.blocking import BlockingScheduler
# sched = BlockingScheduler()  # 创建调度器
# sched.add_job(detail_spider, 'interval', seconds=5,id='detail_spider')  # 每隔5秒执行一次
# sched.start()
