# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html

import json
import codecs
import MySQLdb
import MySQLdb.cursors
from xlutils.copy import copy
from xlrd import open_workbook
from openpyxl import Workbook
import os
from scrapy import log

from twisted.enterprise import adbapi
from scrapy import signals

ATTRS = {
    'title':u'职位名称',
    'link':u'详情链接',
    'company':u'公司名称',
    'updatetime':u'更新日期',
    'publish_date':u'发布日期',
    'location':u'地点',
    'work_place':u'工作地点',
    'wages':u'工资',
    'keyword':u'搜索关键字',
    'work_experience':u'工作经验',
    'education':u'学历',
    'recruitment_number':u'招聘要求',
    'specialty':u'专业',
    'english':u'英语水平'
}

KEYS = [
    'keyword',
    'location',
    'title',
    'link',
    'company',
    'work_place',
    'wages',
    'updatetime',
    'work_experience',
    'education',
    'recruitment_number',
    'specialty',
    'english'
]




class JobsysJsonPipeline(object):
    def __init__(self):
        self.file = codecs.open('..\\jobsys\\jobsys.json', 'w', encoding = 'utf-8')

    def process_item(self, item, spider):
        line = json.dumps(dict(item), ensure_ascii = False) + "\n"
        self.file.write(line)
        return item

    def spider_closed(self, spider):
        self.file.close()

class JobsysXLSXPipeline(object):
    def __init__(self):
        self.filename = "..\\jobsys\\jobsys.xls"
        wb = Workbook()
        sheet1 = wb.active
        sheet1.append([ATTRS[key] for key in KEYS])
        wb.save(self.filename)

    def process_item(self,item,spider):
        job_data = open_workbook(filename = self.filename)
        new_job_data = copy(job_data)
        sheet1 = new_job_data.get_sheet(0)
        item = dict(item)
        row_data = [item[key].strip() for key in KEYS]
        rows =  job_data.sheet_by_index(0).nrows
        cols =  job_data.sheet_by_index(0).ncols
        for col in range(0,cols):
            sheet1.write(rows,col,row_data[col].decode('utf-8'))
        new_job_data.save(self.filename)
        new_job_data = None
        return item

    def spider_closed(self,spider):
        pass

class JobsysMySQLPipeline(object):
    """docstring for MySQLPipeline"""
    def __init__(self):
        self.connpool = adbapi.ConnectionPool('MySQLdb',
            host = '127.0.0.1',
            db = 'Jobsys',
            user = 'root',
            passwd = 'zhiyou100',
            cursorclass = MySQLdb.cursors.DictCursor,
            charset = 'utf8',
            use_unicode = True
            )
    def process_item(self, item, spider):
        query = self.connpool.runInteraction(self._conditional_insert, item)
        query.addErrback(self.handle_error)
        return item

    def _conditional_insert(self, tx, item):
        if item.get('title'):
            tx.execute("insert into job_detail (title, link, company, publish_date) values(%s, %s, %s, %s)",
                (item['title'], item['link'], item['company'], item['updatetime']))

    def handle_error(self, e):
        log.err(e)

