# -*- coding: utf-8 -*-
import scrapy
from movie.items import MovieItem

class MeijuSpider(scrapy.Spider):
    """
    name = 'meiju: 爬虫名。一个项目下可能有很多个爬虫，并且每个爬虫有优先级，并发等设置。 scrapy crawl [name]
    allowed_domins: 为了防止爬虫项目自动爬取到其他网站，设置限制，每一次请求钱都会检查请求的网址是否属于这个域名下，如果是才允许请求，注意：爬取日志爬取网址后响应总为None，检查allowed domin是否正确。
    """
    name = 'meiju'
    allowed_domains = ['meijutt.com']
    start_urls = ['https://www.meijutt.com/new100.html']

    def parse(self, response):
        # xpath正规写法，Selector(response.text).xpath('').extract()
        movie_list = response.xpath('//ul[@class="top-list  fn-clear"]/li')
        for movie in movie_list:
            movie_name = movie.xpath('./h5/a/text()').extract_first()
            # 取出状态标签下所有子标签的文本
            # Google邮件检查的标签中 无字幕 有字幕 和已完结的剧分别三种情况
            # 干脆直接取出状态标签下所有子标签的文本
            states = movie.xpath('./span[@class="state1 new100state1"]')
            state = states.xpath('string(.)').extract_first()
            category = movie.xpath('./span[@class="mjjq"]/text()').extract_first()
            tv = movie.xpath('./span[@class="mjtv"]/text()').extract_first()
            time = movie.xpath('./div/font/text()').extract_first()
            if time:
                time = time
            else:
                time = movie.xpath('./div[@class="lasted-time new100time fn-right"]/text()').extract_first()


            item = MovieItem()
            item['movie_name'] = movie_name
            item['state'] = state
            item['category'] = category
            item['tv'] = tv
            item['time'] = time
            yield item