# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
from facesoCrawler import settings

class FacesocrawlerPipeline(object):
    def process_item(self, item, spider):
        return item


class WeiboPipeline(object):
    def __init__(self):
        # 连接数据库
        self.conn = pymysql.connect(host="192.168.1.134", user="sa", password="12345678", db="faceso", charset='utf8')
        self.cursor = self.conn.cursor()
        #self.cursor.execute("truncate table sk_web_info;")
        #self.conn.commit()
        # 检测重复数据
        self.titles = set()

    def process_item(self, item, spider):
        for key in item.keys():
            if key == 'title':
                item[key] = ''.join(item[key])
            else:
                item[key] = ' '.join(item[key])

        if item['title'] not in self.titles:
            db_url = item['pic_url'].replace(' ', '')
            db_video = item['video_url'].replace(' ', '')
            db_content = item['content'].replace(' ', '')
            db_link_url = item['link_url'].replace(' ', '')

            self.cursor.execute("insert into fs_message_trend (title, content, message_src, http_src, publish_time, key_words) values (%s,%s,%s,%s,%s,%s)", (item['title'], db_url, db_video, db_content, db_link_url))
        self.titles.add(item['title'])
        self.conn.commit()
        return item

    def spider_closed(self, spider):
        self.cursor.close()
        self.conn.close()


if __name__ == "__main__":
    print('This is main of module "WeiboPipeline.py"')
    tt = WeiboPipeline()