# -*- coding: utf-8 -*-
import scrapy


class TupianSpider(scrapy.Spider):
    name = 'tupian'
    allowed_domains = ['tu.heiguang.com']
    start_urls = ['https://tu.heiguang.com/works?page=1&per-page=27']

    def parse(self, response):
        src_ist = response.xpath('//div[@class="width-center tu-listWorks Photos-QH Ajax-b "]/ul[@class="Works "]/li/a/img/@data-original').extract()

        for src in src_ist:
            src= response.urljoin(src)
            print(src)
            fo = open("src.text", "a", newline=None, encoding="utf-8")
            fo.write(src + "\n")


        for x in range(2,1001):
            href="https://tu.heiguang.com/works?page="+str(x)+"&per-page=27"
            yield scrapy.Request(url=href, callback=self.parse)






        #     fo = open("src.text", "a", newline=None, encoding="utf-8")
        #     fo.write(src + "\n")
        #
        # for x in range(2,284):
        #     href="https://www.ixiumei.com/gallery/starphoto/"+str(x)+".shtml"
        #     yield scrapy.Request(url=href, callback=self.parse)



    #     for href in href_ist:
    #          href = response.urljoin(href)
    #          yield scrapy.Request(url=href, callback=self.getcontet, dont_filter=True)
    #
    #
    #
    #
    #     for x in range(2,91):
    #          href="https://www.ivsky.com/tupian/ziranfengguang/index_"+str(x)+".html"
    #          yield scrapy.Request(url=href,callback=self.parse)
    #
    # def getcontet(self,response):
    #     src_list = response.xpath('//ul[@class="pli"]/li//img/@src').extract()
    #     for src in src_list:
    #         src = response.urljoin(src)
    #         fo = open("src.text", "a", newline=None, encoding="utf-8")
    #         fo.write(src + "\n")





