# -*- coding:utf-8 -*-
import scrapy
import re
import os
import urllib
from scrapy.selector import Selector
from scrapy.http import HtmlResponse, Request


class IndexSpider(scrapy.spiders.Spider):
    name = "vxinhge_video"  # 定义爬虫名
    # allowed_domains=["vxinghe.com"]    #搜索的域名范围，也就是爬虫的约束区域，规定爬虫只爬取这个域名下的网页
    start_urls = ["http://video.sina.cn/news/s/2018-01-10/detail-ifyqnick0953723.d.html?vt=4&pos=91"]

    def parse(self, response):
        basePath = '//section[@class="aplayer"]'
        print('--------------->>>>>>>>>>>>>>', response.xpath(basePath).extract)
        srcPath = './/video/@src'
        # namePath = './/div[@class="special-title"]/a/text()'
        for hxs in response.xpath(basePath):
            src = hxs.xpath(srcPath).extract()  # 查询所有img标签的src属性
            # name = hxs.xpath(namePath).extract()
            print('--------------->>>>>>>>>>>>>>', src)
            if src:
                absoluteSrc = "http://vxinghe.com/" + src[0]  # 拼接实际路径,因为.extract()会返回一个list，但是我们是依次取得div，所以是取第0个
                # file_name = "%s.jpg" % (name[0])  # 拼接文件名，学校_姓名
                file_path = os.path.join("/Users/lax/Desktop/video/", src)  # 拼接这个图片的路径，我是放在F盘的pics文件夹下
                urllib.urlretrieve(src, file_path)  # 接收文件路径和需要保存的路径，会自动去文件路径下载并保存到我们指定的本地路径
