# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request, spider

from moviedatas.items import MoviedatasItem

# 匹配猫眼电影榜单top100
class DatasSpider(scrapy.Spider):
    name = 'datas'
    allowed_domains = ['maoyan.com']
    start_urls = ['https://maoyan.com/board/4']
    #电影基本地址
    base_url='https://maoyan.com'
    #下一页的地址
    base_url_next=['https://maoyan.com/board/4'][0]

    def parse(self, response):
        # 获取榜单信息
        var_dates_info = response.xpath('//*[@id="app"]/div/div/div[1]/dl/dd')
        # print(len(var_dates_info))
        for var_datalist in var_dates_info:
            data = MoviedatasItem()
            # 匹配电影名称
            data['filmname'] = var_datalist.xpath('div/div/div[1]/p[1]/a/@title').extract()[0]
            print(data['filmname'])

            # 匹配电影地址
            data['var_url'] = var_datalist.xpath('div/div/div[1]/p[1]/a/@href').extract()[:]
            print(data['var_url'])

            # 匹配上映时间
            data['filmdate'] = var_datalist.xpath('div/div/div[1]/p[3]/text()').extract()[0].split(':')
            print(data['filmdate'])

            # 匹配主演
            data['major'] = var_datalist.xpath('div/div/div[1]/p[2]/text()').extract()[0].strip()
            print(data['major'])


            yield data
            pass
        # 找到下一页的地址实现翻页数据获取
        next_url = response.xpath('.').re_first(r'href="(.*?)">下一页</a>')
        if next_url:
            next_url_add = self.base_url_next + next_url
            yield Request(url=next_url_add, callback=self.parse, dont_filter=True)

            pass

        pass
