# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.utils.project import get_project_settings
import pymongo
import pymysql

# 把数据存到txt中
# class MultipagePipeline:
#     def process_item(self, item, spider):
#         with open('mult_info.txt','a',encoding='utf8') as f:
#             f.write(item['title']+';'+item['job_name']+'\n')

# 把数据存到MongoDB数据库中
# class MultipagePipeline:
#     # 初始化MongoDB数据库
#     def __init__(self):
#         # 获得settings.py设置信息
#         settings = get_project_settings()
#         host = settings.get("MONGODB_HOST")
#         port = settings.get("MONGODB_PORT")
#         dbname = settings.get("MONGODB_DBNAME")
#         sheetname = settings.get("MONGODB_SHEETNAME")
#         # 创建MONGODB数据库链接
#         client = pymongo.MongoClient(host=host, port=port)
#         mydb = client[dbname]
#         self.post = mydb[sheetname]
#     # 存入数据库
#     def process_item(self, item, spider):
#         data = dict(item)
#         self.post.insert(data)


# 将数据保存到MySQL中
class MultipagePipeline:
    # 初始化MySQL数据库
    def process_item(self, item, spider):
        # 获得settings.py设置信息
        DB_MYSQL = spider.settings.get('DB_MYSQL')
        # 创建连接
        con = pymysql.connect(**DB_MYSQL)
        # 创建游标
        cur = con.cursor()
        sql = ('insert into scrapy_jobs (title,jobname) values (%s,%s)')
        lis = (item['title'], item['job_name'])
        try:
            cur.execute(sql, lis)
        except Exception as e:
            print("insert err:", e)
            con.rollback()
        else:
            con.commit()
        cur.close()
        con.close()
        print("insert mysql  is ok")
