# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql
from pymysql.converters import escape_string
import time
import meilisearch

class WebspiderPipeline:
    def __init__(self):
        # connection database
        self.connect = pymysql.connect(host='rm-wz9797jfgzx5x1z02po.mysql.rds.aliyuncs.com', user='qmso', passwd='BYFEM3h0NlDZsXQR', db='qmso')  # 后面三个依次是数据库连接名、数据库密码、数据库名称
        self.cursor = self.connect.cursor()
        print("+----连接数据库成功----+")

    def process_item(self, item, spider):
        #保存到mysql数据库
        sql = "INSERT INTO `web` (id,url,title,keywords,description,bodyhtml,grab_time)  VALUE  ('%s','%s','%s','%s','%s','%s','%s') " % (
            item['id'], 
            escape_string(item['url']), 
            escape_string(item['title']), 
            escape_string(item['keywords']), 
            escape_string(item['description']), 
            escape_string(item['bodyhtml']), 
            item['grab_time']
            )
        
        sql2 = " ON DUPLICATE KEY UPDATE title= '%s', keywords= '%s', description= '%s', bodyhtml= '%s',grab_time= '%s'" % ( 
            escape_string(item['title']), 
            escape_string(item['keywords']), 
            escape_string(item['description']), 
            escape_string(item['bodyhtml']), 
            item['grab_time']
            )

        try:
            # 执行插入数据到数据库操作
            self.cursor.execute(sql + sql2)
            # 提交，不进行提交无法保存到数据库
            self.connect.commit()
            #提交到mili
            print("+----数据写入成功----+")
        except pymysql.Error as e:
            print(e)
            print("xxxxx 数据写入失败 xxxxx")
        return item

    def close_spider(self, spider):
        # 关闭游标和连接
        self.cursor.close()
        self.connect.close()

class MeilisearchPipeline:
    def __init__(self):
        #连接meilisearch服务器
        client = meilisearch.Client('https://so.7sebook.com', 'd1729fb196429ff5bb940c6f3b6c8e19a3dfc03902861bc6ee62286fdd9fd9cf')
        self.web = client.index('web')
        print("+----连接MeiliSearch成功----+")

    def process_item(self, item, spider):
        documents = [{ 
            'id': item['id'],
            'url': item['url'],
            'title':item['title'],
            'keywords':item['keywords'],
            'description':item['description'],
            'bodyhtml':item['bodyhtml'],
            'grab_time':item['grab_time']
        }]
        self.web.add_documents(documents)
        return item
