import scrapy
from scrapy.http import Request
from scrapy.selector import Selector
from ..items import FirstspiderItem         # import items.py file


class JandanSpider(scrapy.Spider):
    name = 'jandan'
    allowed_domains = ['jandan.net']
    start_urls = ['http://jandan.net/']

    def start_requests(self):
        self.i = 1
        for url in self.start_urls:
            yield Request(url, dont_filter=True, callback=self.parse1)

    def parse1(self, response):
        self.i = self.i+1
        print(self.i)
        hxs = Selector(response)
        a_list = hxs.xpath("//div[@class='indexs']//h2")

        for tag in a_list:
            url = tag.xpath('./a/@href').extract_first()
            title = tag.xpath('./a/text()').extract_first()
            yield FirstspiderItem(url=url, title=title)   # Structured with FirstspiderItem
        print("--------------------------------")
        if hxs.xpath("//div[@class='wp-pagenavi']/a[2]/@href"):
            next_href = hxs.xpath("//div[@class='wp-pagenavi']/a[2]/@href").extract()
        else:
            next_href = hxs.xpath("//div[@class='wp-pagenavi']/a/@href").extract()
        # print(next_href)
        next_last = "".join(next_href)
        next_url = "http://jandan.net"+next_last
        print(next_url)
        yield Request(next_url, callback=self.parse1)

