# -*- coding: utf-8 -*-
import json

import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings


class ImageItem(scrapy.Item):
    count = scrapy.Field()
    keyword = scrapy.Field()
    images = scrapy.Field()


class GoogleSpider(scrapy.Spider):
    name = "google"
    # allowed_domains = ["google.com.hk"]
    img_url = "https://www.google.com.hk/search?q={keyword}&tbm=isch"

    def __init__(self, keyword, num=1000, **kwargs):
        super().__init__(**kwargs)
        self.keyword = keyword
        self.num = num
        self.count = 0

    def start_requests(self):
        yield scrapy.Request(url=self.img_url.format(keyword=self.keyword), callback=self.parse)

    def parse(self, response):
        print('response.url: %s' % response.url)
        # with open('google.html', 'w', encoding='utf-8') as f:
        #     f.write(response.text)
        image_urls = response.xpath('//table[@class="images_table"]/tr/td/a/img/@src').extract()
        print(len(image_urls), image_urls)


if __name__ == "__main__":
    crawl = CrawlerProcess(get_project_settings())
    crawl.crawl('google', keyword='椅子')
    crawl.start()
    pass

