# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from .items import WeiBoArticleItem, UserItem, CommentItem
import pymysql


class SpiderdemoPipeline:
    # 为避免连接数量超过上限 导致mysql崩溃
    # 在init方法中创建连接
    # 这样在爬虫程序运行时只会创建一次
    def __init__(self):
        self.conn = pymysql.connect(
            user='root',
            password='123456',
            host='master',
            port=3306,
            database='weiboSpider'
        )

    # 将爬取的数据 按照不同的item类型 写入mysql不同的表中
    def process_item(self, item, spider):
        cursor = self.conn.cursor()
        if isinstance(item, WeiBoArticleItem):
            # 将评论数据发送到评论表中
            try:
                cursor.execute("insert into weiboArticle values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
                               , (item['id'],
                                  item['screen_name'],
                                  item['created_at'],
                                  item['source'],
                                  item['text'],
                                  item['mid'],
                                  item['attitudes_count'],
                                  item['comments_count'],
                                  item['reposts_count']
                                  ))

            # 发生错误则回滚数据库
            except Exception as e:
                print("*" * 50)
                print(e)
                print("*" * 50)
                self.conn.rollback()
            else:
                # 未发生错误提交数据
                self.conn.commit()
        elif isinstance(item, UserItem):
            # 将用户数据发送到用户表中
            try:
                cursor.execute("insert into user values(%s,%s,%s,%s,%s)"
                               , (item['id'],
                                  item['screen_name'],
                                  item['followers_count'],
                                  item['follow_count'],
                                  item['gender']
                                  ))
            # 发生错误则回滚数据库
            except Exception as e:
                print("*" * 50)
                print(e)
                print("*" * 50)
                self.conn.rollback()
            else:
                # 未发生错误提交数据
                self.conn.commit()
        elif isinstance(item, CommentItem):
            # 将评论数据发送到评论表中
            try:
                cursor.execute("insert into comment values(%s,%s,%s,%s,%s)"
                               , (item['mid'],
                                  item['user_id'],
                                  item['comment_id'],
                                  item['text'],
                                  item['created_at']
                                  ))
            # 发生错误则回滚数据库
            except Exception as e:
                print("*" * 50)
                print(e)
                print("*" * 50)
                self.conn.rollback()
            else:
                # 未发生错误提交数据
                self.conn.commit()
        return item

    # # 爬虫程序停止时会执行
    # def close_spider(self, spider):
    #     self.conn.close()

class GuaZiSpiderPipeline:
    # 处理GuaziSpider构建的item
    def process_item(self, item, spider):
        return item