# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
from collections import OrderedDict
from pprint import pprint

from pymongo import MongoClient
from openpyxl import Workbook

client = MongoClient(host="127.0.0.1",port=27017)
collection = client["suning"]["book_info"]

wb = Workbook()

#获得一个sheet 表

ws = wb.active
ws.title = "book_info"
ws["A1"] = "大分类标题"
ws["B1"] = "中间分类标题"
ws["C1"] = "小分类标题"
ws["D1"] = "小分类链接"
ws["E1"] = "图书标题"
# ws["F1"] = "图书照片链接"
ws["F1"] = "书店名字"
ws["G1"] = "图书链接"
ws["H1"] = "图书价格"


class BookPipeline(object):

    def process_item(self, item, spider):
        # print("*"*50)

        # item = json.dumps(item)

        # with open('suning1.json',"a") as f:
        #     f.write(json.dumps(item, ensure_ascii=False, indent=2))

        ws.append([item["type1_book"],item["type2_book"],item["type3_book"],item["type3_bookurl"],item["book_name"].strip(),item["shopname"],item["book_url"],item["book_price"]])
        wb.save("book_infos.xlsx")
        # collection.insert_one(item)
        # pprint(item)
        return item
