import scrapy
from scrapy.http import Request

from ..items import HunterItem


class GbltestSpider(scrapy.Spider):
    name = 'gbltest'
    page_num = 25
    # start_urls = ["http://www.ylie51.com/forum-52-1.html"]
    # start_urls = ["http://www.ylie51.com/forum-54-1.html"]
    # start_urls = ["http://www.ylie51.com/forum-53-1.html"]
    # start_urls = ["http://www.ylie51.com/forum-57-1.html"]
    # start_urls = ["http://www.ylie51.com/forum-62-46.html"]
    # start_urls = ["http://www.ylie51.com/forum-45-1.html"]
    # start_urls = ["http://www.ylie51.com/forum-58-25.html"]
    start_urls = ["http://www.ylie51.com/forum-51-1.html"]

    def start_requests(self):
        for url in self.start_urls:
            yield Request(url, dont_filter=True)
            pass

    def parse(self, response):
        """" 获取某页的全部帖子地址 """""
        all_url_page = []
        for url in response.xpath("//tbody/tr/th/a[@class='s xst']/@href").extract():
            all_url_page.append("http://www.ylie51.com/" + url)

        """ 处理本页的博客内容 """
        for url in all_url_page:
            yield Request(url=url, callback=self.process_one_page)

        """ 跳转到下一页 """
        next_page = response.url[0:31] + str(self.page_num) + ".html"
        self.page_num += 1
        yield Request(url=next_page, callback=self.parse)

    def process_one_page(self, response):
        item = HunterItem()
        item["url"] = response.url
        item["author"] = response.xpath("//div[@class='authi']/a/text()").extract()[0]  # 可以取出所有的跟帖作者
        item["time"] = response.xpath("//div[@class='authi']/em/text()").extract()[0][4:]  # 可以取出每页发表博客的使用时间
        item["title"] = response.xpath("//*[@id='thread_subject']/text()").extract()[0]
        item["content"] = response.xpath("//td[@class='t_f']/text()").extract()[1]  # 可以取出所有帖子内容，[1]是取出发帖人的发帖内容

        img = response.xpath("//img[@class='guestviewthumb_cur']/@makefile").extract()
        if len(img) != 0:
            for i, img_url in enumerate(img):
                img[i] = "http://www.ylie51.com/" + img_url
            item["img_urls"] = ','.join(img)
            yield item

