import scrapy
from ..items import QiubaiJokeItem
import os
class QiubaiSpider(scrapy.Spider):
    name = 'qiubai'
    allowed_domains = ['www.qiushibaike.com']
    start_urls = ['https://www.qiushibaike.com/text/page/18/']
    page=1

    def parse(self, response):
        #os.environ['HTTP_PROXY'] = {"http": "http://116.149.202.255:4226", "https": "http://116.149.202.255:4226"}
        items = QiubaiJokeItem()
        listauths=response.xpath('//div[@class="author clearfix"]/a/h2/text()')
        listconts = response.xpath('//div[@class="content"]/span')
        liststars = response.xpath('//div[@class="stats"]/span/i/text()')
        for i in range(len(listauths)):
            items['author']=listauths[i].get()
            items['context']=listconts[i].get()
            items['stars']=liststars[i].get()
            yield items

        if self.page <=1:
            self.page +=1
            url='https://www.qiushibaike.com/text/page/{}/'.format(str(self.page))
            yield scrapy.Request(url=url, callback=self.parse,meta={'proxy':'"http://116.149.202.255:4226"'})
