# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import copy
import json

from twisted.enterprise import adbapi

class ScrapyRecruitPipeline(object):
    def process_item(self, item, spider):
        return item

class JsonPipeline(object):
    def __init__(self):
        self.file = codecs.open('liepin.json', mode='ab+', encoding='utf-8')

    def process_item(self, item, spider):
        item_c = copy.deepcopy(item);
        line = json.dumps(dict(item_c)) + "\n"
        self.file.write(line.decode("unicode_escape"))
        return item
    pass

class SQLStorePipeline(object):
    def __init__(self):
        self.dbpool = adbapi.ConnectionPool('MySQLdb', db='scrapydb',
                                            user='root', passwd='root1234',
                                            charset='utf8', use_unicode=True)

    def process_item(self, item, spider):
        # run db query in thread pool
        query = self.dbpool.runInteraction(self._conditional_insert, item)
        query.addErrback(self.handle_error)

        return item

    def _conditional_insert(self, tx, item):
        # create record if doesn't exist.
        # all this block run on it's own thread
        tx.execute( \
            "insert into t_liepin(job_url, job_url_referer, job_title, job_company, job_location, job_post_time, job_features, job_tags, job_pay, job_content, createDtTm) "
            "values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, current_timestamp)",
            (item['job_url'], item['job_url_referer'], item['job_title'], item['job_company'], item['job_location'],
             item['job_post_time'], item['job_features'], item['job_tags'], item['job_pay'], item['job_content'])
        )
        print("Item stored in db: %s" % item['job_title'])

    def handle_error(self, e):
        print("error: %s" % e);