# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import time
import pymysql
from pymysql import cursors
from twisted.enterprise import adbapi
from scrapy.exporters import JsonItemExporter
from scrapy.exporters import JsonLinesItemExporter


class ZhipinPipeline(object):
    """
        testFunction
    """
    def __init__(self):
        self.f = open('user-agent-changgui.json', 'a', encoding='utf-8')
        self.f.write('[' + '\n')
        print('打开文件。。。')

    def process_item(self, item, spider):
        content = json.dumps(dict(item), ensure_ascii=False) + ',' + '\n'
        self.f.write(content)
        print('写入数据。。。')

    def close_spider(self, spider):
        self.f.write(']')
        self.f.close()
        print('关闭文件。。。')


class JsonExporterPipleline(object):
    """
        该类在程序结束后写入数据
        容易爆内存
        格式直接为json，无需完善
    """
    # 调用scrapy提供的json export导出json文件
    def __init__(self):
        self.file = open('user-agent.json', 'ab')
        self.exporter = JsonItemExporter(self.file, encoding="utf-8", ensure_ascii=False)
        self.exporter.start_exporting()
        print('打开文件。。。')

    def process_item(self, item, spider):
        self.exporter.export_item(item)
        return item

    def close_spider(self, spider):
        self.exporter.finish_exporting()
        self.file.close()
        print('关闭文件。。。')


class JsonLinesItemExporterPipleline(object):
    """
        写入json文件的pipeline
    """
    @classmethod
    def from_crawler(cls, crawler):
        """
            从项目的配置文件中读取关键词，用于拼接文件名和url
        :param crawler:
        :return:
        """
        cls.KEY = crawler.settings.get("KEY", 'Java')
        return cls()

    # 调用scrapy提供的json export导出json文件
    def __init__(self):
        """
            该__init__功能：
                1.初始化json文件，使用KEY生成文件名
                2.判断文件最后一行是否是']',是的话删除，补充逗号和回车，开始追加数据，不是的话打开文件写入
        """

        TIME_STR = time.strftime('%Y-%m-%d', time.localtime(time.time()))
        FilePATH = 'data/' + self.KEY + '_' + TIME_STR + '.json'

        try:
            self.file = open(FilePATH, 'ab+')
            offset = -3
            while True:
                """
                file.seek(offset, whence=0)：从文件中移动offset个操作标记（文件指针），正往结束方向移动，负往开始方向移动。
                如果设定了whence参数，就以whence设定的起始位为准，0代表从头开始，1代表当前位置，2代表文件最末尾位置。 
                """
                self.file.seek(offset, 2)  # seek(offset, 2)表示文件指针：从文件末尾(2)开始向前50个字符(-50)
                lines = self.file.readlines()  # 读取文件指针范围内所有行
                if len(lines) >= 2:  # 判断是否最后至少有两行，这样保证了最后一行是完整的
                    last_line = lines[-1].decode()  # 取最后一行
                    break
                # 如果off为50时得到的readlines只有一行内容，那么不能保证最后一行是完整的
                # 所以off翻倍重新运行，直到readlines不止一行
                offset *= 2
            if last_line == ']':
                self.file.seek(-2, 2)
                self.file.truncate()
                self.file.write((',' + '\n').encode())
        except Exception as e:
            print(e)
            self.file = open(FilePATH, 'ab+')
            self.file.write(('[' + '\n').encode())

        self.exporter = JsonLinesItemExporter(self.file, encoding="utf-8", ensure_ascii=False)
        print('打开文件。。。')

    def open_spider(self, spider):
        """
        取出KEY设置start_url
        :param spider:
        """
        spider.start_urls = [f'https://www.zhipin.com/c100010000/e_102/?query={self.KEY}&page=1']
        # spider.start_urls = [f'http://httpbin.org/user-agent']
        # print(self.KEY)

    def process_item(self, item, spider):
        """
            每一个item传入就写入文件，防止爆内存
        :param item:
        :param spider:
        :return:
        """
        self.exporter.export_item(item)

        self.file.seek(-1, 2)
        self.file.truncate()
        self.file.write((',' + '\n').encode())

        return item

    def close_spider(self, spider):
        """
            关闭文件前删去多余的逗号和回车
            并写入']'形成json文件
        :param spider:
        """
        self.file.seek(-2, 2)
        self.file.truncate()
        self.file.write(('\n' + ']').encode())
        self.file.close()
        print('关闭文件。。。')


class MySQLPipeline(object):
    """
        写入MySQL的pipeline
    """
    @classmethod
    def from_crawler(cls, crawler):
        """

        :param crawler:
        :return:
        """
        # 从项目的配置文件中读取相应的参数
        cls.HOST = crawler.settings.get("MYSQL_HOST", '127.0.0.1')
        cls.PORT = crawler.settings.get("MYSQL_PORT", 3306)
        cls.USER = crawler.settings.get("MYSQL_USER", 'root')
        cls.PASSWD = crawler.settings.get("MYSQL_PASSWORD", '123')
        cls.MYSQL_DB_NAME = crawler.settings.get("MYSQL_DB_NAME", 'zhaopin')
        cls.MYSQL_TABLE_NAME = crawler.settings.get("MYSQL_TABLE_NAME", 'job')
        cls.KEY = crawler.settings.get("KEY", 'Java')
        return cls()

    def __init__(self):
        # self.connect = pymysql.connect(
        #     host=self.HOST,  # 数据库地址
        #     port=self.PORT,  # 数据库端口
        #     db=self.MYSQL_DB_NAME,  # 数据库名
        #     user=self.USER,  # 数据库用户名
        #     passwd=self.PASSWD,  # 数据库密码
        #     charset='utf8',  # 编码方式
        #     )
        #
        # 通过cursor执行增删查改
        # self.cursor = self.connect.cursor();

        self.dbpool = adbapi.ConnectionPool('pymysql', host=self.HOST, db=self.MYSQL_DB_NAME, user=self.USER,
                                            passwd=self.PASSWD, port=self.PORT, charset='utf8',
                                            )

    def open_spider(self, spider):
        """
        取出KEY设置start_url
        :param spider:
        """
        spider.start_urls = [f'https://www.zhipin.com/c100010000/e_102/?query={self.KEY}&page=1']

    def process_item(self, item, spider):
        """

        :param item:
        :param spider:
        :return:
        """
        item = self.skill_filter(item)
        self.dbpool.runInteraction(self.insert_db, item)
        return item

    def close_spider(self, spider):
        """

        :param spider:
        """
        self.dbpool.close()

    def readtxt(self):
        skills = list()
        file = open("skills.txt", 'r', encoding='utf-8')
        for line in file.readlines():
            line = line.strip('\n')
            skills.append(line)
        file.close()
        return skills

    def fuc_filter(self,skill_list):
        try:
            date = skill_list
            if len(date) > 1:
                print('不为空')
            else:
                date.add(self.KEY)
        except Exception as e:
            print(e)
        print(date)
        skills = self.readtxt()
        result = filter(lambda x: x in skills, date)
        skill_str = ','.join(result)
        return skill_str

    def skill_filter(self,item):
        skill = item['skill']
        skill_list = skill.split(',')
        skill_str = self.fuc_filter(skill_list)
        item['skill'] = skill_str
        return item

    def insert_db(self, tx, item):
        """
            与数据库的字段一一对应，顺序相同
        :param tx:
        :param item:
        """
        values = (
            item['job_name'],
            item['company_name'],
            item['city'],
            item['url'],
            item['salary'],
            item['min_salary'],
            item['max_salary'],
            item['avg_salary'],
            item['education'],
            item['experience'],
            item['address'],
            item['sel_stage'],
            item['industry'],
            item['sel_scale'],
            item['skill'],
            item['welfare'],
            item['description'],
            item['company_info'],
        )
        pre_sql = 'INSERT INTO ' + self.MYSQL_TABLE_NAME
        sql = pre_sql + '(job_name,company_name,city,url,salary,min_salary,max_salary,avg_salary,education,' \
                        'experience,address,sel_stage,industry,sel_scale,skill,welfare,description,company_info) VALUES ' \
                        '(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) '

        # values = (
        #     item['user_agent'],
        #     item['num'],
        # )
        # sql = '''
        #             INSERT INTO
        #             user_agent(user_agent,num)
        #             VALUES (%s,%s)
        #             '''
        tx.execute(sql, values)
