# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import EastdayItem
from os import path
import os

'''
视频地址：
https://shortmvpc.eastday.com/vvideo/20191101/20191101223842127373909_1.mp4

视频播放页地址
https://video.eastday.com/a/200111145116682411804.html

视频地址xpath
//input[@id="mp4Source"]/@value
视频标题
//meta[@ name="description"]/@content
'''


class EastdaySpider(CrawlSpider):
    name = 'eastday'
    allowed_domains = ['eastday.com']
    start_urls = ['https://video.eastday.com']

    rules = (
        Rule(LinkExtractor(allow=r'https://video.eastday.com/a/\d+.html'), callback='parse_item', follow=True),
        # Rule(LinkExtractor(allow=r'https://video.eastday.com/a/\d+.html'), callback='parse_item', follow=False),
    )

    def parse_item(self, response):
        item = EastdayItem()
        try:
            item['video_url'] = 'https:' + response.xpath('//input[@id="mp4Source"]/@value').extract_first()
            item['video_title'] = response.xpath('//meta[@ name="description"]/@content').extract_first()

            yield scrapy.Request(url=item['video_url'], meta=item, callback=self.parse_video, priority=1000)
        except Exception as e:
            print('异常啦')
            print(e)

    def parse_video(self, response):
        item = response.meta
        file_name = item['video_title'] + '.mp4'
        base_dir = path.join(path.curdir, 'video_download')
        item['video_local_path'] = path.join(base_dir, file_name)
        #       判断存储目录是否存在
        if not os.path.exists(base_dir):
            os.mkdir(base_dir)
        with open(item['video_local_path'], 'wb') as f:
            f.write(response.body)
        yield item
