# -*- coding: utf-8 -*-

import re
import json
import scrapy


class MySpider(scrapy.Spider):
    name = 'yxws'

    def start_requests(self):
        self.start_urls = ['http://www.mvyxws.com/']

        for url in self.start_urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        """
        target url:http://www.mvyxws.com/

        """
        # 每一条数据
        nodeList = response.xpath('//div[@class="category"]')
        #
        for node in nodeList:
            # 初始化item
            item = {}
            # 大类
            item['name'] = node.xpath('./h2/text()').extract_first('')
            list = node.xpath('./p/a')   #遍历所有的a链接并发送请求
            # links = []
            for i in range(1, len(list)+1):
                # item['category'] = node.xpath('./p/a[{}]/text()'.format(i)).extract_first('')
                item['category_link'] = node.xpath('./p/a[{}]/@href'.format(i)).extract_first('')
                item['category_link'] = response.urljoin(item['category_link'])
                # links.append(item['category_link'])
                # yield item
                # print(item)
                # print(item['category_link'])
                itemData = item
                yield scrapy.Request(url=item['category_link'], callback=self.parse_detail, dont_filter=True, meta={'itemData':itemData,'url':item['category_link']})  #只传送最后一个item回去
            # for link in links:
                # print(link)
                # yield item
                # yield scrapy.Request(url=link, callback=self.parse_detail, dont_filter=True, meta={'item': item})

    def parse_detail(self, response):
        """
        target url:
        """
        # itemList = []
        item = response.meta.get('itemData', {})
        # url = response.meta.get('url', {})
        # itemList.append(item)
        # itemList.append(url)
        # print(itemList)
        # yield item
        # item = {}
        nodelist = response.xpath('//div[@class="jb-list-box"]/ul[@class="jb-list"]/li')
        for node in nodelist:
            # item['category'] = response.xpath('//h2[@class="zjtit"]//text()').extract_first('')
            # item['detail'] = node.xpath('./a/text()').extract_first('')
            item['detail_link'] = node.xpath('./a/@href').extract_first('')   #遍历所有的详情页a链接并发送请求
            item['detail_link'] = response.urljoin(item['detail_link'])
            # yield item
            yield scrapy.Request(url=item['detail_link'], callback=self.deep_parse, meta={'item': item})

    def deep_parse(self, response):
        item = response.meta.get('item', {})
        item['category'] = response.xpath('//div[@class="location"]/a[2]/text()').extract_first('')
        # item['videoTitle'] = response.xpath('//div[@class="video"]/div[@class="video-play fl"]/h2/text()').extract_first('')
        # item['video_link'] = response.xpath('//div[@id="player-container-id"]/video[@id="player-container-id_html5_api"]/@src').extract_first('')
        node_list = response.xpath('//section[1]').extract_first('')
        appid = re.findall('appID: \'(.*?)\'', node_list)
        appid = ''.join(appid)
        fileid = re.findall('fileID:  \'(.*?)\'', node_list)
        fileid = ''.join(fileid)
        url = 'http://playvideo.qcloud.com/getplayinfo/v2/' + str(appid) + '/' + str(fileid)
        # item['video'] = response.xpath(
        # '//div[@class="video-c"]/div[@id="player-container-id"]/video[@id="player-container-id_html5_api"]/@src').extract_first('')
        # item['video_title'] = response.xpath( '//section[@class="video-section"]/div[@class="video"]/div[@class="video-play fl"]/h2/text()').extract_first('')
        # yield item
        yield scrapy.Request(url=url, callback=self.deeper_parse, dont_filter=False, meta={'item': item})

    def deeper_parse(self, response):
        item = response.meta.get('item', {})
        p = json.loads(response.text)
        item['video_title'] = p['videoInfo']['basicInfo']['name']
        item['video_link'] = p['videoInfo']['transcodeList'][0]['url']
        yield item