# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from datetime import datetime

import scrapy
from scrapy.pipelines.images import ImagesPipeline
from sqlalchemy import Column
from sqlalchemy import create_engine, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker

"""
CREATE TABLE `cn_news` (
  `id` int(11) unsigned NOT NULL AUTO_INCREMENT COMMENT '新闻id',
  `cate_id` int(11) unsigned NOT NULL COMMENT '分类id，关联表：cn_cate',
  `title` varchar(150) DEFAULT '' COMMENT '标题',
  `describe` varchar(255) DEFAULT '' COMMENT '描述',
  `picture` varchar(150) DEFAULT '' COMMENT '图片',
  `url` varchar(255) DEFAULT '' COMMENT '链接地址',
  `content` text COMMENT '内容',
  `minute` varchar(10) DEFAULT '' COMMENT '分钟',
  `is_rec` tinyint(1) unsigned DEFAULT '2' COMMENT '是否推荐',
  `is_new` tinyint(1) unsigned DEFAULT '2' COMMENT '是否最新',
  `create_time` int(10) unsigned NOT NULL COMMENT '创建时间',
  `update_time` int(10) unsigned NOT NULL COMMENT '修改时间',
  `pv` int(11) unsigned DEFAULT '0' COMMENT '浏览量',
  `status` tinyint(1) unsigned DEFAULT '1' COMMENT '状态：[1:开启] [2:关闭] [0:删除]',
  PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=73 DEFAULT CHARSET=utf8 COMMENT='新闻表'
"""
Base = declarative_base()


class CnNews(Base):
    __tablename__ = "cn_news"
    id = Column(Integer, primary_key=True, autoincrement=True)
    cate_id = Column(Integer, default=0)
    title = Column(String(150), default="")
    describe = Column(String(255), default="")
    picture = Column(String(150), default="")
    url = Column(String(255), default="")
    content = Column(String(255), default="")
    create_time = Column(DateTime, default=datetime.now())
    update_time = Column(DateTime, default=datetime.now())


engine = create_engine("mysql://foxue:foxue123@219.234.4.16:3306/foxue?charset=utf8")#create_engine("sqlite:///:memory:",echo=True)
session = sessionmaker(bind=engine)()
Base.metadata.create_all(engine)


class BuddhaPipeline(object):
    def __init__(self):
        self.cache = dict()

    def process_item(self, item, spider):
        if not self.cache.__contains__(item["url"]):
            self.cache[item["url"]] = dict()

        _dict = self.cache[item["url"]]

        _dict[item["page"]] = item

        if len(_dict) != int(item["pagenum"]):
            return

        content = []
        for page in sorted(_dict.keys()):
            content.extend(_dict[page]["content"])

        item["content"] = "\n".join(content)

        if session.query(CnNews).filter(CnNews.url == item["url"]).one_or_none():
            return

        news = CnNews(
            url=item["url"],
            title=item["title"],
            content=item["content"],
            describe=item["content"][:150],
            picture=item["pic"],
            create_time=item["ctime"],
            update_time=item["ctime"],
            cate_id=item["cate_id"]
        )

        session.add(news)
        session.commit()
        return item


class BuddhaImagesPipeline(ImagesPipeline):
    def get_media_requests(self, item, info):
        yield scrapy.Request(item["pic"])

    def item_completed(self, results, item, info):
        ok, result = results[0]
        item["pic"] = "/Uploads/news/"+result["path"] if ok else item["pic"]
        return item
