# -*- coding: utf-8 -*-
import scrapy
from Douban.items import DoubanItem
import time
from Douban.settings import ipPool
import random
import requests


class MovieSpider(scrapy.Spider):
    name = 'movie'
    allowed_domains = ['douban.com']
    start_urls = ['https://movie.douban.com/top250']

    # 第一次请求的时候要填满ip池, 所以在爬虫文件的start_requests函数下手
    def start_requests(self):
        # 第一次请求发起前先填充一下ip池，通过api生成的ip链接，获取ip_list
        # ips = requests.get('你的ip获取的地址')
        ips = requests.get('http://http.tiqu.letecs.com/getip3?num=30&type=1&pro=&city=0&yys=0&port=1&pack=160758&ts=0&ys=0&cs=0&lb=1&sb=0&pb=45&mr=2&regions=&gm=4')
        for ip in ips.text.split('\r\n'):
            ipPool.append('http://' + ip)

    def parse(self, response):
        # 查看用户代理
        print(response.request.headers['User-Agent'])
        el_list = response.xpath('//div[@class="info"]')
        item = DoubanItem()
        print(len(el_list))
        for el in el_list:
            item["name"] = el.xpath('./div[1]/a/span[1]/text()').extract_first()
            item["score"] = el.xpath('./div[2]/div/span[2]/text()').extract_first()
            # yield item
        # 翻页
        url = response.xpath('//span[@class="next"]/a/@href').extract_first()
        if url != None:
            next_url = response.urljoin(url)
            yield scrapy.Request(
                url=next_url,
                callback=self.parse
            )
            time.sleep(2)