# -*- coding: utf-8 -*-
import scrapy


class MmonlyGroupImageSpider(scrapy.Spider):
    name = 'mmonly_group_image'
    allowed_domains = ['www.mmonly.cc']
    start_urls = ['http://www.mmonly.cc/mmtp/xgmn/292641.html']

    def parse(self, response):
        item = {}
        des = response.xpath('//div[@class="descriptionBox"]/p/text()').extract()
        item['des'] = '' if len(des) == 0 else des[0]
        image_src = response.xpath('//div[@id="big-pic"]/p/a/img/@src').extract()[0]
        item['image_src'] = image_src
        time = response.xpath('//div[@class="photo-opts"]/span/text()').extract()[0]
        time = time[time.index('更新时间：'):len(time)].replace('更新时间：', '')
        item['time'] = time
        yield item

        # 查找下一页
        elem_pages = response.xpath('//div[@class="pages"]/ul/li')
        tag = self.start_urls[0].replace('http://www.mmonly.cc/', "").replace('.html', "").split('/')[0:2]
        for elem in elem_pages:
            if len(elem.xpath('./@class')) > 0 and \
                    elem.xpath('./@class').extract()[0] == 'thisclass' and \
                    elem_pages.index(elem) + 1 < len(elem_pages) and \
                    elem_pages[elem_pages.index(elem) + 1].xpath('./a/text()').extract()[0] != '下一页':
                next_url = "http://www.mmonly.cc/" + "/".join(tag) + "/" + \
                           elem_pages[elem_pages.index(elem) + 1].xpath('./a/@href').extract()[0]
                print("\n\n\n********" + next_url + "*******\n")
                yield scrapy.Request(
                    next_url,
                    callback=self.parse
                )
