# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import codecs
import csv

from itemadapter import ItemAdapter
import openpyxl
from scrapy.exporters import CsvItemExporter


class WorkDataPipeline:

    def __init__(self):
        self.wb = None
        self.ws = None
        self.name = '工资与工作'
        self.head = ('指标', '城市', '2021', '2020', '2019', '2018', '2017', '2016', '2015', '2014', '2013', '2012'
                     , '备注')

    # 爬虫开始运行
    def open_spider(self, spider):
        self.wb = openpyxl.Workbook()
        # 默认工作表
        self.ws = self.wb.active
        self.ws.title = '工资与工作'
        self.ws.append(self.head)

    def process_item(self, item, spider):
        self.ws.append((item['industry'], item['city'], item['list_2021'], item['list_2020'], item['list_2019']
                        , item['list_2018'], item['list_2017'], item['list_2016'], item['list_2015']
                        , item['list_2014'], item['list_2013'], item['list_2012'], item['exp']))
        return item
    
    # 爬虫结束运行
    def close_spider(self, spider):
        self.wb.save('工资与工作.xlsx')


class CsvPrintPipeline(object):

    def __init__(self):
        self.cout = None
        self.file = None
        self.data = []

    def open_spider(self, spider):
        print("开始输出CSV文件")
        self.file = open("work_data.csv", "w", newline='')
        self.cout = csv.DictWriter(self.file
                                   , ['industry', 'city', '2021', '2020', '2019',
                                      '2018', '2017', '2016', '2015',
                                      '2014', '2013', '2012', 'exp'])
        self.cout.writeheader()

    def process_item(self, item, spider):
        industry = item.get('industry')
        city = item.get('city')
        list_2021 = item.get('list_2021')
        list_2020 = item.get('list_2020')
        list_2019 = item.get('list_2019')
        list_2018 = item.get('list_2018')
        list_2017 = item.get('list_2017')
        list_2016 = item.get('list_2016')
        list_2015 = item.get('list_2015')
        list_2014 = item.get('list_2014')
        list_2013 = item.get('list_2013')
        list_2012 = item.get('list_2012')
        self.data.append([industry, city, list_2021, list_2020, list_2019, list_2018, list_2017, list_2016, list_2015,
                          list_2014, list_2013, list_2012])

        if len(self.data) == 100:
            self.cout.writerows(self.data)
            self.data = []
        return item

    def close_spider(self, spider):
        if len(self.data) > 0:
            self.cout.writerows(self.data)
        self.cout.close()
        print("结束输出CSV文件")
