# -*- coding: utf-8 -*-
import scrapy
import os
import urllib.request
from douban.items import DoubanItem


class DoubanSpiderSpider(scrapy.Spider):
    name = 'douban_spider'
    allowed_domains = ['movie.douban.com']
    start_urls = ['https://movie.douban.com/top250']

    def parse(self, response):
        movie_list = response.xpath("//div[@class='article']//ol[@class='grid_view']/li")
        path = "D:\\project\\scrapy\\douban\\douban\\images\\"
        for i_item in movie_list:
            douban_item = DoubanItem()
            douban_item['serial_number'] = i_item.xpath(".//div[@class='item']//em/text()").extract_first()
            douban_item['movie_name'] = i_item.xpath(".//div[@class='info']//a/span[1]/text()").extract_first()
            # 电影名作为图片名
            name = i_item.xpath(".//div[@class='info']//a/span[1]/text()").extract_first()
            # 1-100存入1-100目录 101-200存入101-200目录
            number = int(i_item.xpath(".//div[@class='item']//em/text()").extract_first())
            desc = i_item.xpath(".//div[@class='bd']//p[1]/text()").extract()
            # 拿到数组遍历
            str = ''
            for i_desc in desc:
                i_desc = i_desc + ''
                str += i_desc.strip()

            douban_item['introduce'] = str.strip()
            douban_item['star'] = i_item.xpath(".//span[@class='rating_num']/text()").extract_first()
            douban_item['evaluate'] = i_item.xpath(".//div[@class='star']//span[4]/text()").extract_first()
            douban_item['describle'] = i_item.xpath(".//p[@class='quote']//span/text()").extract_first()
            douban_item['image'] = i_item.xpath(".//div[@class='pic']//a//img/@src").extract_first()

            if douban_item['image']:
                file_name = "%s.jpg" % name  # 拼接电影名
                if number < 100:
                    # 排名前100放这里
                    if not(self.isFileMkdir('1-100')):
                        # 判断文件夹是否存在
                        os.mkdir(path+'1-100')
                    file_path = os.path.join(path+"1-100", file_name)
                elif 100 < number < 200:
                    if not(self.isFileMkdir('101-200')):
                        os.mkdir(path+'101-200')
                    file_path = os.path.join(path+"101-200", file_name)
                else:
                    if not(self.isFileMkdir('201-300')):
                        os.mkdir(path+'201-300')
                    file_path = os.path.join(path+"201-300", file_name)
                urllib.request.urlretrieve(douban_item['image'], file_path)
                print(file_name)
            # yield douban_item
            # print(douban_item)
        # 解析下一页
        next_link = response.xpath(".//span[@class='next']/link/@href").extract()
        if next_link:
            next_link = next_link[0]
            yield scrapy.Request("https://movie.douban.com/top250" + next_link, callback=self.parse)

    # 判断目录是否存在
    def isFileMkdir(self, name):
        path = "D:\\project\\scrapy\\douban\\douban\\images\\"+name
        return os.path.exists(path)
