import scrapy

from scrapy_demo.items import MovieItem


class MaoyanSpider(scrapy.Spider):
  name = 'maoyan'                       # 必须
  allowed_domains = ['maoyan.com']
  start_urls = ['https://maoyan.com/films'] # 必须

  # 重载start_requests方法
  def start_requests(self):

    cookies = {
      'uuid_n_v': 'v1',
      'uuid': '974C9EC0F28311EA827D23E3CB2A4E391F50D14CB7824E0EBB304C38D7A8BE53',
      '_csrf': '8566a3ecb7a17e3ec3d75c52d69ff5f5b43319bab5fc5642f125ecb763450994',
      'Hm_lvt_703e94591e87be68cc8da0da7cbd0be2': '1599645759',
      '_lxsdk_cuid': '17472513858c8-08216457689276-15336251-384000-17472513858c8',
      '_lxsdk': '974C9EC0F28311EA827D23E3CB2A4E391F50D14CB7824E0EBB304C38D7A8BE53',
      'mojo-uuid': '82f4899f88ada63daf73f9e6218a1e2b',
      'mojo-session-id': '{"id":"c90f5c5eb8433acceaae46b5df215521","time":1599645760734}',
      'Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2': '1599646819',
      '__mta': '143563402.1599645760434.1599646817582.1599646819411.8',
      'mojo-trace-id': '11',
      '_lxsdk_s': '1747251385d-f3d-eb3-664%7C%7C18',
    }

    # 再次请求到详情页，并且声明回调函数callback，dont_filter=True 不进行域名过滤，meta给回调函数传递数据
    for url in self.start_urls:
      print(url)
      yield scrapy.Request(url=url, cookies=cookies, callback=self.parse)

  def parse(self, response):
    print('-'*30, response.url)
    names = response.xpath('//div[@class="channel-detail movie-item-title"]/@title').extract()
    scores_div = response.xpath('//div[@class="channel-detail channel-detail-orange"]')
    scores = [s.xpath('string(.)').extract_first() for s in scores_div]
    print(names)
    print(scores)
    for n, s in zip(names, scores):
      print(n, ':', s)
      yield {'name': n, 'score': s}  # 字典 / item对象
    # item = MovieItem( )
    # item['name'] = n
    # item['score'] = s
    # yield item

    # 推送数据到pipleline
