import scrapy
from scrapy.selector import Selector
import json

#https://www.dbmeinv.com/ - //*[@class="height_min"]/@src
#http://www.biread.net/forum-37-2.html -//*[@class="z"]/img/@src
class MzSpider(scrapy.Spider):
    name = "meizi"

    headers = {
        "Accept": "*/*",
        "Accept-Encoding": "gzip,deflate",
        "Accept-Language": "en-US,en;q=0.8,zh-TW;q=0.6,zh;q=0.4",
        "Connection": "keep-alive",
        "Content-Type":" application/x-www-form-urlencoded; charset=UTF-8",
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36",
        "Referer": "http://www.biread.net"
	}

    urls = ['http://www.biread.net/forum-37-{0}.html'.format(x) for x in range(1, 100)]


# //*[@id="main"]/div[2]/div[2]/ul/li[2]/div/div[1]/a/img
# //*[@id="main"]/div[2]/div[2]/ul/li[3]/div/div[1]/a/img
# //*[@class="height_min"]/@src
    def start_requests(self):
        for url in self.urls:
            yield scrapy.Request(url=url, headers=self.headers, callback=self.parse)
    
    def parse(self, response):
        x = Selector(response=response).xpath('//*[@class="z"]/img/@src').extract()
        self.log(x)
        sp = response.url.split('-')
        b = sp[len(sp)-1].split('.')
        page = b[0]
        

        filename = 'biread{0}.json'.format(page)

        with open(filename, 'wb') as f:
            f.write(json.dumps(x))
        self.log('Saved file {0}'.format(page))
        
    