import scrapy
import json
# scrapy crawl jingdong -o jingdong.csv

from demo2.items import jingdongItem

class JingdongSpider(scrapy.Spider):
    name = "jingdong"
    allowed_domains = ["jd.com"]
    start_urls = ["https://gw-e.jd.com/client.action?body=%7B%22moduleType%22%3A1%2C%22page%22%3A1%2C%22pageSize%22%3A20%2C%22scopeType%22%3A1%2C%22month%22%3A%22%22%2C%22categoryFirst%22%3A%22%22%2C%22categorySecond%22%3A%22%22%2C%22categoryThree%22%3A%22%22%7D&functionId=bookRank&client=e.jd.com&timeout=30000&output=jsonp&callback=func"]

    def parse(self, response):
        # open(f"jingdong{2}.html", "wb").write(response.body)
        res = response.body[5:-1]
        json_data = json.loads(res)
        books = json_data['data']['books']
        for book in books:
            item = jingdongItem()
            item['sequence'] = book.get('sequence')
            item['discount'] = book.get('discount')
            item['publisher'] = book.get('publisher')
            item['sellPrice'] = book.get('sellPrice')
            item['definePrice'] = book.get('definePrice')
            item['bookName'] = book.get('bookName')
            item['authors'] = book.get('authors')[0].get('name')
            item['bookId'] = book.get('bookId')
            yield item
        pass
