import scrapy
import re
from scrapy import cmdline
from getShipVideo.items import GetshipvideoItem
from getShipVideo.pipelines import GetshipvideoPipeline
from scrapy.utils.project import get_project_settings
import os


class GetvideoSpider(scrapy.Spider):

    name = "getVideo"
    allowed_domains = ["www.vcg.com"]
    url = 'https://www.vcg.com/creative-video-search/5578/?page=10'
    start_urls = [url]
    test = 0

    def parse(self, response):
        print("BEGIN PARSE!!!!!!!!!!!")
        lists = response.xpath('//article[@class="R3J0t"]/div[@class="_2tMKZ"]')
        item = GetshipvideoItem()
        for i in lists:
            link = i.xpath('./a/@href').get()
            desc = i.xpath('./a/@title').get()
            if desc:
                desc = desc.replace('\n', '').replace('\r', '').replace(' ', '')
            else:
                desc = '未找到标题'
            item['video'] = link
            print(f"{self.test}. " + f"视频标题: {desc}, 视频链接: https://www.vcg.com{link}")

            headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0",
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
                "Accept-Language": "en-US,en;q=0.5",
                "Connection": "keep-alive",
                "Referer": "https://www.vcg.com"
            }
            yield scrapy.Request(
                url="https://www.vcg.com" + item["video"],
                callback=self.parse_detail,
                meta={"item": item, "desc": desc},
                headers=headers
            )

            self.test += 1
        # if self.page <30:
        #     self.page += 1
        #     url = f'https://www.vcg.com/creative-video-search/5578/?page={self.page}'
        #     yield scrapy.Request(url=url, callback=self.parse)

    def parse_detail(self, response):
        item = response.meta["item"]
        x = response.xpath('//video/@src').get()
        # //*[@id="videoContainer"]/video
        if not x:
            x = response.xpath('//source/@src').get()
        print(f"视频文件链接: {x}")
        item['video'] = x
        item['desc'] = response.meta['desc']

        settings = get_project_settings()
        store_uri = settings.get('FILES_STORE')

        desc = item['desc']
        pattern = re.compile(r'[^\u4e00-\u9fa5]')
        filename = re.sub(pattern, '', desc) + ".mp4"
        file_path = os.path.join(store_uri, filename)

        pipeline = GetshipvideoPipeline(store_uri)
        success = False
        if x:
            pipeline.download_video(url="https:" + x, filename=file_path)
            success = True

        print(f"视频标题: {desc}, 视频下载链接: https:{x}, 视频是否下载成功: {'成功' if success else '失败'}\n")

        yield item


cmdline.execute('scrapy crawl getVideo'.split())