from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapy.http import Request, FormRequest
from dirbot.items import Website
import scrapy
import re
class DmozSpider(Spider):
    name = "dmoz"
    # allowed_domains = ["dmoz.org"]
    # start_urls = [
    #     "http://baozouribao.com/documents",
    # ]

    def start_requests(self):
        for i in range(1,11):#这个地方是月份，
            for k in range(1,32):#这个地方是日期
                if i<10:
                    url1="http://paper.people.com.cn/rmrb/html/2017-0"+str(i)
                else :
                    url1="http://paper.people.com.cn/rmrb/html/2017-"+str(i)
                if k<10:
                    url=url1+"/0"+str(k)+"/nbs.D110000renmrb_01.htm"
                else :
                    url=url1+"/"+str(k)+"/nbs.D110000renmrb_01.htm"
                yield(Request(url,headers=None,callback=self.parse))
    def parse(self,response):
        site=response.xpath("//map[@name='PagePicMap']/area")
        url=response.url
        url=re.match('(http://paper.people.com.cn/rmrb/html/201[0-9]-[0-9][0-9]/[0-9][0-9]/)',url)
        url=(url.group(1))
        for i in site:
            ready_url=url+(i.xpath("@href").extract()[0]) 
            yield (Request(ready_url,callback=self.my_parse))
        next_pages=response.xpath("//div[@class='ban_t']/div[1]/ul[1]/li[1]/span[1]/a")
        for i in next_pages:
            next_page=i.xpath('text()').extract()[0]
            if (next_page=="下一版 "):
                next_page=i.xpath("@href").extract()[0]
                break
        if len(next_page)==0:
            pass
        else :
            next_page=url+next_page
            yield Request(next_page,callback=self.parse)
    def my_parse(self,response):
        item=Website()
        title=response.xpath("//div[@class='text_c']/h1/text()").extract()[0]
        item['title']=title
        contents=""
        content=response.xpath("//div[@id='ozoom']/p")
        for i in content:
            #print(i)
            contents+=i.xpath("text()").extract()[0]
        item['content']=contents
        yield item
        
        
 
    # def parse(self, response):
    #     """
    #     The lines below is a spider contract. For more info see:
    #     http://doc.scrapy.org/en/latest/topics/contracts.html

    #     @url http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/
    #     @scrapes name
    #     """
    #     sites = response.css('#site-list-content > div.site-item > div.title-and-desc')
    #     items = []

    #     for site in sites:
    #         item = Website()
    #         item['name'] = site.css(
    #             'a > div.site-title::text').extract_first().strip()
    #         item['url'] = site.xpath(
    #             'a/@href').extract_first().strip()
    #         item['description'] = site.css(
    #             'div.site-descr::text').extract_first().strip()
    #         items.append(item)
    #     return items
