# -*- coding: utf-8 -*-

__author__ = 'lee'

import scrapy
import sys
import urlparse
from scrapy import Selector
from scrapy import Request
from Umei.items import AlbumItem


reload(sys)
sys.setdefaultencoding('utf-8')


class Yixiuba(scrapy.Spider):
    name = "yixiuba"
    allowed_domains = ["yixiuba.com"]
    start_urls = ["http://www.yixiuba.com"]

    def parse(self, response):
        # 解析类别
        links = Selector(response).xpath('//div[@class="header1"]//a[@href]')
        for link in links:
            print link.extract()
            url = link.xpath('./@href').extract()[0]
            url = urlparse.urljoin(response.url, url)
            yield Request(url, callback=self.parse_page)

    def parse_page(self,response):
        # 解析本页面
        links = Selector(response).xpath('//div[@id="contant"]//a[@class="title"][@target="_blank"]')
        for link in links:
            # print link.extract()
            # title = link.xpath('./@title').extract()[0]
            url = link.xpath('./@href').extract()[0]
            url = urlparse.urljoin(response.url, url)
            yield Request(url, callback=self.parse_album)

        # 获取下一页的
        next_url = self.find_next_page(response)
        if next_url and len(next_url) > 0:
            # print next_url
            yield Request(next_url, callback=self.parse_page)

    def parse_album(self, response):
        selector = Selector(response)
        title = selector.xpath('//div[@class="Title"]/h1/text()').extract()[0]
        images = selector.xpath('//div[@class="page-list"]//img/@src').extract()
        next_url = self.find_next_page(response)

        try:
            last = response.meta['item']
            if last:
                for img in images:
                    last.append(img)
                images = last
        except:
            pass

        if next_url:
            yield Request(next_url, callback=self.parse_album, meta={'item': images})
        else:
            album = AlbumItem()
            album['title'] = title
            album['url'] = response.url
            album['images'] = images
            yield album

    def find_next_page(self, response):
        links = Selector(response).xpath('//div[@class="sxye" or @class="dede_pages"]//a')
        # print u"下一页: ", link
        # if link:
        #     return urlparse.urljoin(response.url, link.extract()[0])

        for link in links:
            if cmp( link.xpath('./text()').extract()[0], u"下一页") == 0:
                next_page = link.xpath('./@href').extract()[0]
                if len(next_page) > 0 and cmp('#', next_page) != 0:
                    return urlparse.urljoin(response.url, next_page)