# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import os
import sqlite3

import scrapy
import sqlalchemy
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline


from sqlalchemy import create_engine, Column, String, Float, Integer
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from colorama import Fore

from ShopifyMonitor.settings import BASE_DIR


class ShopifymonitorPipeline(object):
    def process_item(self, item, spider):
        return item


class SqlitePipeline(object):
    """用过 sqlalchemy（ORM）将数据信息添加到数据库中"""
    # 创建数据库模型， 用于做基类
    Base = declarative_base()

    # 类-数据库表(products)
    class Product(Base):
        __tablename__ = "products"
        title = Column(String(100))
        image = Column(String(200))
        # 每个商品只能爬取一次
        link = Column(String(200), unique=True, primary_key=True)  # 商品详情页网址必须唯一
        price = Column(Float)
        sizes = Column(String(200))
        stocks = Column(Integer)

        def __repr__(self):
            """字符串友好展示"""
            return "<Product>{}:{}".format(self.title, self.stocks)

    def open_spider(self, spider):  # 创建/链接数据库
        # 创建数据库引擎. sqlite:///相对路径, sqlite:////绝对路径
        engine = create_engine("sqlite:///" + 'shopify.db')
        # 创建 session 类
        Session = sessionmaker(bind=engine)

        # 根据数据库引擎的配置，创建数据库表
        self.Base.metadata.create_all(engine)
        # 生成session会话， 用于缓存
        self.session = Session()

    def process_item(self, item, spider):
        """
        sqlite3.IntegrityError, sqlalchemy.exc.IntegrityError
        # 1). 数据库已经存在该商品的详情页链接， 接着判断商品的库存是否发生变化？
            - 如果库存发生变化，
                - 当前库存>原库存, 通知补货商品名称和补货的数量(邮件/微信/钉钉/discord), 同时更新产品目前的sizes和stocks - 补货(更新)
                - 当前库存<=原库存, 只更新sizes和stock
            - 如果库存无变化， 不做处理
        # 2). 数据库不存在该商品的详情页链接，说明商品是上新产品, 通知商品产品，并将产品信息添加数据库中(添加)

        """
        # 1). 数据库已经存在该商品的详情页链接， 接着判断商品的库存是否发生变化？unique/sqlite3.IntegrityError
        # 如果库存发生变化， 当前库存/原库存
        # 从当前数据库中找到该商品的信息(库存)
        product = self.session.query(self.Product).filter(self.Product.link == item['link']).first()
        if product:
            old_stock = product.stocks
            now_stock = item['stocks']
            # print("原库存:", old_stock, type(old_stock))
            # print("当前库存:", now_stock, type(now_stock))
            # 当前库存>原库存, 通知补货商品名称和补货的数量(邮件/微信/钉钉/discord), 同时更新产品目前的sizes和stocks - 补货(更新)
            # 当前库存<=原库存, 只更新sizes和stock
            if now_stock > old_stock:
                print(Fore.YELLOW + '[补货]: 商品%s补货数量为%s' %(product.title, now_stock-old_stock))
            product.sizes = item['sizes']
            product.stocks = item['stocks']
            self.session.add(product)
            self.session.commit()
        else:
            # 2). 数据库不存在该商品的详情页链接，说明商品是上新产品, 通知商品产品，并将产品信息添加数据库中(添加)
            product = self.Product(title=item['title'],
                                   image=item['image'],
                                   link=item['link'],
                                   price=item['price'],
                                   sizes=item['sizes'],
                                   stocks=item['stocks']
                                   )
            self.session.add(product)
            self.session.commit()
            print(Fore.GREEN + "新产品[%s]上架, 目前库存为[%s]" %(item['title'], item['stocks']))
        return  item

    def close_spider(self, spider):  # 关闭/断开数据库
        self.session.close()


class MyImagesPipeline(ImagesPipeline):
    """自定义图片存储管道"""

    def get_media_requests(self, item, info):
        # 默认情况下下载多个图片的url
        image_url = item['image']     # http://www.xxxx.com/xxx.png
        yield scrapy.Request(image_url)

    def item_completed(self, results, item, info):
        # image_paths = [x['path'] for ok, x in results if ok]
        # if not image_paths:
        #     raise DropItem("Item contains no images")
        # item['image'] = image_paths[0]
        return item
