# -*- coding: utf-8 -*-
import scrapy
import sys
#sys.path.append(r"/media/gnagge/程序/python/spiderman/Doubanmovie/Doubanmovie")
sys.path.append(r"/media/gangge/新加卷/winsoftware/python/train/spiderman/Doubanmovie/Doubanmovie")  #更改成你的items.py文件所在的文件夹目录
from items import DoubanmovieItem

#采集数据的对象 爬虫的主程序
class MoviespiderSpider(scrapy.Spider):
    name = 'MovieSpider'    #爬虫程序名
    allowed_domains = ['douban.com']    #爬虫的域名
    start_urls = ['https://movie.douban.com/top250']  #爬虫的地址

    #解析接收的数据  response为请求网站接收到的数据
    def parse(self, response):
        #解析数据 -- html
        #xPath
        #解析所有的<div class = "item"></div>
        item_list = response.xpath('//div[@class= "item"]')
        #循环读取每一个item

        for itemm in item_list:
            #itemm中保存的是一个”item“
            #创建一个DoubanmovieItem类的对象
            movie = DoubanmovieItem()
            #读取排名
            movie['Mov_Rank'] = itemm.xpath('div[@class = "pic"]/em/text()').extract()

            #读取电影名称 保存到实体类
            movie['Mov_Name'] = itemm.xpath('div[@class = "info"]/div[@class = "hd"]/a/span[@class = "title"][1]/text()').extract()

            #电影简介 导演 主演
            movie['Mov_abstract'] = itemm.xpath('div[@class = "info"]/div[@class = "bd"]/p/text()').extract()

            #海报图片
            movie['Mov_pic'] = itemm.xpath('div[@class = "pic"]/a/img/@src').extract()

            #电影评分
            movie['Mov_grade']  = itemm.xpath('div[@class = "info"]/div[@class = "bd"]/div[@class = "star"]/span[@class = "rating_num"][1]/text()').extract()

            #描述
            movie['Mov_discribe'] = itemm.xpath('div[@class = "info"]/div[@class = "bd"]/p[@class = "quote"]/span/text()').extract()

            # 当前排名的下一详细页面地址
            #movie['Mov_detailed'] = itemm.xpath('div[@class = "info"]/div[@class = "bd"]/a/@hrf').extract()

            #添加对象到生成器中
            yield movie

        # 获取下一页请求 自动翻页处理
        # 解析 start的值
        nextPage = response.xpath('//span[@class = "next"]/a/@href')
        #判断nextPage中是否有值
        if nextPage:
            #拼接成网址
            url = response.urljoin(nextPage[0].extract())
            #重新请求调用parse函数
            yield scrapy.Request(url,self.parse)

            pass
        pass


