import scrapy

# scrapy crawl ipproxytest --nolog
class IpproxytestSpider(scrapy.Spider):
    name = "ipproxytest"
    start_urls = ['https://guba.eastmoney.com/list,603400.html']#,'http://httpbin.org/ip','http://httpbin.org/ip','https://guba.eastmoney.com/list,000001.html','https://www.baidu.com','https://httpbin.org/ip','http://www.baidu.com','https://httpbin.org/ip']

    # def start_requests(self):
    #     # 自定义代理IP
    #     for url in self.start_urls:
    #         yield scrapy.Request(
    #             url,
    #             meta={'proxy': 'http://username:password@proxy_ip:proxy_port'}
    #         )

    def parse(self, response):
        #将响应写到test.html
        with open('test.html', 'wb') as f:
            f.write(response.body)
        # print(response.text)
        # self.log(response.text)
        # yield scrapy.Request(url='http://httpbin.org/ip', callback=self.parse)
