#/usr/bin/python3
#-*- coding:utf-8 -*-

from scrapy import Spider
from time import sleep
from bot.items.dw import listItem,contentItem
from pyquery import PyQuery
from scrapy.http import Request

class dwSpider(Spider):

    name = 'dw'
    allowed_domains = ["dewen.net.cn"]
    start_urls = ["http://www.dewen.net.cn/questions?page=1"]

    download_delay = 1

    domain = 'http://www.dewen.net.cn'

    curr_size = 1
    curr_page = 1

    custom_settings = {
        'ITEM_PIPELINES':{'bot.pipelines.dw.pipeline': 300},
    }

    def parse(self, response):
        res = PyQuery(response.body)
        page_url = dwSpider.domain + res('.page > .curpage').next('a').attr('href')
        page_txt = res('.page > .curpage').next('a').text()
        dwSpider.curr_page = res('.page > .curpage').text()
        dwSpider.curr_size = 1
        print('爬虫 %s 解析信息完毕,本页 : %s ->>> 下一页 : %s ' % (dwSpider.name,dwSpider.curr_page,page_txt))
        item = listItem()
        item['flag'] = dwSpider.name + '_list'
        for k,v in enumerate(res('#questions_area > div')):
            print('爬虫 %s 准备处理过滤第%s页 ->>> 第%d个数据...' % (dwSpider.name,dwSpider.curr_page,k+1))

            item['link'] = res('.question_listitle a').attr('href')

            yield Request(url=item['link'],callback=self.parse_content,dont_filter=True)
            yield item
        if page_url is not None:
            yield Request(url=page_url,callback=self.parse,dont_filter=True)

    def parse_content(self,response):
        print('爬虫 %s 正在爬取第%s页 ->>> 第%d个内容页面' %(dwSpider.name,dwSpider.curr_page,dwSpider.curr_size))

        res = PyQuery(response.body)
        item = contentItem()
        item['flag'] = dwSpider.name + '_content'


        dwSpider.curr_size += 1
        return 123456
