# -*- coding: utf-8 -*-

import random

import scrapy
from scrapy import Spider
from scrapy.selector import Selector
from jdmz.items import JdmzItem

USER_AGENT = [
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:30.0) Gecko/20100101 Firefox/30.0",
    "Mozilla/5.0 (iPhone; CPU iPhone OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Mobile/12B466 MicroMessenger/6.1.5 NetType/3G+",
    "Mozilla/5.0 (iPhone; CPU iPhone OS 8_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Mobile/12D508 MicroMessenger/6.1.5 NetType/WIFI",
    "Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0; QQBrowser/8.1.3886.400) like Gecko"
]


class JdmzSpider(Spider):
    name = 'mz'
    allowed_domains = ["jandan.net"]
    start_urls = ["http://jandan.net/ooxx/page-%s" % page for page in xrange(1000, 1406)] # 3 pages

    def parse(self, response):
        self.log('===> 开始处理找到网址：{0}'.format(response.url))

        for li in response.xpath('//li[re:test(@id, "comment-\d+$")]'):
            # print(li)
            item = JdmzItem()
            item['datasrc'] = li.xpath('.//div[@class="text"]/p/img/@src').extract()[0]
            item['author'] = li.xpath('.//div[@class="author"]/strong/text()').extract()[0]

            yield item
