# -*- coding: utf-8 -*-
from scrapy.spiders import CrawlSpider
from scrapy.http import Request
from Smoking.quotesbot.items import SmokingSpiderItem
import json


class Smokingspider(CrawlSpider):
    name = 'Smokingspider'

    # 设置headers伪装成浏览器
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.54 Safari/536.5'
    }
    allowed_domains = ["http://image.baidu.com/"]
    start_urls = ["http://image.baidu.com/"]

    def parse(self, response):  # 定义解析函数
        search_word = "烟民"
        for pn in range(0, 3000, 30):
            baiDu_urls = "https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word={0}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn=".format(search_word) + str(pn) + "&rn=60&gsm=3c&1507915209449="
            print(baiDu_urls)
            yield Request(baiDu_urls, meta={"search_word": search_word}, callback=self.get_pic, dont_filter=True)

    def get_pic(self, response):  # 从图片list中获取每个pic的信息
        item = SmokingSpiderItem()  # 实例化item
        response_json = response.text  # 存储返回的json数据
        response_dict = json.loads(response_json)  # 转化为字典
        response_dict_data = response_dict['data']  # 图片的有效数据在data参数中

        for pic in response_dict_data:  # pic为每个图片的信息数据，dict类型
            if pic:
                item['search_word'] = response.meta['search_word']  # 搜索关键词赋值
                item['pic_url'] = [pic['middleURL']]  # 百度图片搜索结果url (setting中pic_url应该为数组形式)
                item['pic_name'] = pic['fromPageTitleEnc']  # 百度图片搜索结果对应的title
                yield item

