# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import json


class FirstDemoPipeline:
    def __init__(self):
        self.file = open('duanzi.json','w',encoding="utf8")
    def process_item(self, item, spider):
        item = dict(item)
        str_data = json.dumps(item,ensure_ascii=False) + ',\n'
        self.file.write(str_data)
        return item
    def __del__(self):
        self.file.close()
import pymysql



class FirstDemoPipeline(object):
    # 写进json文件
    def open_spider(self, spider):
        self.file = open('duanzi.json', 'w', encoding="utf8")

    def process_item(self, item, spider):
        item = dict(item)
        str_data = json.dumps(item, ensure_ascii=False) + ',\n'
        self.file.write(str_data)
        return item

    def close_spider(self, spider):
        self.file.close()


# 写进mysql数据库
class DBPipiline:
    def open_spider(self, spider):
        self.con = pymysql.connect(host='localhost',
                                   user='root', password='111111',
                                   port=3306,
                                   db='duanzi',
                                   charset='utf8'
                                   )
        self.cursor = self.con.cursor()
    def process_item(self, item, spider):
        # content太大了 可以取消掉
        sql = 'INSERT INTO duanzi_spider(title,author,link,views,type,content) values ("%s","%s","%s","%s","%s","%s")'\
              %(item['title'],item['author'],item['link'],item['views'],item['type'],item['content'])
        self.cursor.execute(sql)
        self.con.commit()
    def close_spider(self, spider):
        self.cursor.close()
        self.con.close()
