# -*- coding: utf-8 -*-
import csv
import os
import time

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
from twisted.enterprise import adbapi


class WwwJobComPipeline(object):
    @classmethod
    def from_settings(cls, settings):
        dbparams = dict(
            host=settings['MYSQL_HOST'],
            db=settings['MYSQL_DBNAME'],
            user=settings['MYSQL_USER'],
            passwd=settings['MYSQL_PASSWD'],
            charset='utf8',
            cursorclass=pymysql.cursors.DictCursor,
            use_unicode=False,
        )
        dbpool = adbapi.ConnectionPool('pymysql', **dbparams)
        return cls(dbpool)

    def __init__(self, dbpool):
        self.dbpool = dbpool

    def process_item_saveto_txt_zhaopin(self, item, spider):
        # file_path = os.path.join(self.files_dir, item['file_name'])
        file_path = os.path.join("","zhaopintxt.txt")
        with open(file_path, 'a', encoding='utf-8') as f:
            f.write("\n"+str(item["position_id"])+"\n"+item["city"]+"\n"+item["salary"]+"\n"+item["company_name"]+"\n"+item["company_size"] +"\n"+item["position_name"]+"\n"+item["work_year"]+"\n" )
        return item
    def process_item_saveto_txt_job51(self, item, spider):
        # file_path = os.path.join(self.files_dir, item['file_name'])

        #写csv
        # self.file = open('jobs{}.csv'.format(time.strftime("%Y%m%d_%H%M%S", time.localtime())), 'a', newline='', encoding='utf-8')
        self.file = open('jobs{}.csv'.format(time.strftime("%Y%m%d", time.localtime())), 'a', newline='', encoding='utf-8')
        self.writer = csv.writer(self.file)
        # self.writer.writerow(['field1', 'field2'])
        # values_list = [value for key, value in item.items() if value is not None]
        val = item.items()
        values_list = [value for key, value in item.items()]
        self.writer.writerow(values_list)
        return item

    def process_item(self, item, spider):
        #保存到csv
        self.process_item_saveto_txt_job51(item, spider)
        #保存到mysql
        query = self.dbpool.runInteraction(self._conditional_insert, item)
        query.addErrback(self._handle_error, item, spider)
        return item

    def _conditional_insert(self, tx, item):
        # print item['name']
        sql = "select * from jobs where position_id=%s and platform=%s"
        position_id = (item["position_id"], item["platform"])
        result = tx.execute(sql, position_id)
        if (result == 0):
            sql = "insert into jobs(position_id,position_name,position_lables,work_year,salary,city,education,company_name,industry_field,finance_stage,company_size,updated_at,`time`,platform,avg_salary) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
            params = (item["position_id"], item["position_name"], item["position_lables"], item["work_year"], item["salary"],item["city"], item["education"], item["company_name"], item["industry_field"],item["finance_stage"], item["company_size"], item["updated_at"], item["time"],item["platform"], item["avg_salary"])
            tx.execute(sql, params)
            print("add id："+str(item["position_id"]))

    def _handle_error(self, failue, item, spider):
        print('_handle_error')
        print(item)
        print(failue)
