# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from videoproject.items import VideoprojectItem

class Videoproject0126Spider(scrapy.Spider):
    name = 'videoproject0126'
    allowed_domains = ['www.bayiyy.com']
    start_urls = ['http://www.bayiyy.com/dianshiju/list_0_0_0_0_3_1.html']

    def parse(self, response):
        # 实例一个VideoprojectItem对象
        video = VideoprojectItem()
        videoimageurl = response.xpath('//ul[@class="v_picTxt v_pic_187_249 v_limit_width clearfix"]')
        for video_item in videoimageurl:
            # 获取电视剧名称
            video['videoname'] = video_item.xpath('./li/div[@class="v_pic"]/a/@title').extract()
            # 获取电视剧链接
            video['videourl'] = video_item.xpath('./li/div[@class="v_pic"]/a/@href').extract()
            # 获取电视剧图片下载地址
            video['videoimage'] = video_item.xpath('./li/div[@class="v_pic"]/img/@data-original').extract()
            # 将封装好的电视剧信息添加到容器中,yield作用是创建一个列表并添加元素
            yield video
            # 设置下一页链接地址通过设置字符串拼接url和i(规律是每页数据递增)
            for i in range(2,10):
                url='http://www.bayiyy.com/dianshiju/list_0_0_0_0_3_'
                yield Request(url+str(i)+'.html',callback=self.parse)
        pass
