# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import sqlite3
import time
from datetime import datetime

from email_spider.set.setting import Set


class EmailSpiderPipeline:
    def __init__(self):
        self.con = sqlite3.connect('company.db')
        self.cur = self.con.cursor()
        self.data = time.strftime('%Y-%m-%d')
        self.file_name = Set().file_name  # 需要爬取的数据源文件夹名
        self.table = Set().db_table  # 需要处理的数据表

        try:
            self.cur.execute('''create table table=:value1
            (id integer primary key autoincrement, company, tianyancha_url, email, tel)''', {'value1': self.table})
            self.con.commit()
        except Exception as e:
            print(e)

    def process_item(self, item, spider):
        try:
            self.cur.execute('''insert into {}
            (company, tianyancha_url, email, tel) values (?, ?, ?, ?)
                '''.format(self.table), (item['company'], item['tianyancha_url'], item['email'], item['tel']))
            self.con.commit()
        except Exception as e:
            print('data save error. ', e)
            f = open(self.file_name + '/data_save_error_' + self.data + '.txt', 'a', encoding='utf-8')
            f.write('\n {}----{}\n'.format(datetime.now(), e))
            f.close()
        return item

    def close_spider(self):
        self.cur.close()
        self.con.close()
