# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import csv
import os

# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


from hdfs import InsecureClient
import csv


class MyspiderPipeline:
    def __init__(self):
        self.client = None
        self.file_path = "/SparkAnalyzeRecruitmentSolution/jobData.csv"  # 指定的HDFS文件路径

    def open_spider(self, spider):
        if spider.name == 'bossData':
            # 创建HDFS客户端连接，使用指定的端口9870
            self.client = InsecureClient('http://node1.itcast.cn:9870', user='root')
            # 初始化表头数据
            columns = ['type', 'title', 'companyTitle', 'minSalary', 'maxSalary', 'workExperience',
                       'educational', 'detailUrl', 'companyPeople', 'workTag', 'welfare', 'imgSrc', 'city']
            # 先将表头数据写入到一个临时的列表中
            header_data = [dict(zip(columns, columns))]
            try:
                with self.client.write(self.file_path, overwrite=True, encoding='utf-8') as writer:
                    csv_writer = csv.DictWriter(writer, fieldnames=columns)
                    csv_writer.writerows(header_data)
            except Exception as e:
                print(f"写入表头到HDFS文件时出错: {e}")

    def process_item(self, item, spider):
        if spider.name == 'bossData':
            try:
                with self.client.write(self.file_path, append=True, encoding='utf-8') as writer:
                    csv_writer = csv.DictWriter(writer, fieldnames=item.keys())
                    csv_writer.writerow(item)
            except Exception as e:
                print(f"写入数据到HDFS文件时出错: {e}")
            return item

    def close_spider(self, spider):
        if spider.name == 'bossData':
            self.client = None

class UrlPipeline:
    def __init__(self):
        self.client = None
        self.file_path = "/SparkAnalyzeRecruitmentSolution/jobUrl.csv"  # 指定HDFS文件路径，可按需调整

    def open_spider(self, spider):
        if spider.name == 'bossUrl':
            # 创建HDFS客户端连接，使用指定端口9870
            self.client = InsecureClient('http://node1.itcast.cn:9870', user='root')
            columns = ['type', 'url']
            header_data = [dict(zip(columns, columns))]
            try:
                with self.client.write(self.file_path, overwrite=True, encoding='utf-8') as writer:
                    csv_writer = csv.DictWriter(writer, fieldnames=columns)
                    csv_writer.writerows(header_data)
            except Exception as e:
                print(f"在打开爬虫时写入表头到HDFS文件出错: {e}")

    def process_item(self, item, spider):
        if spider.name == 'bossUrl':
            try:
                with self.client.write(self.file_path, append=True, encoding='utf-8') as writer:
                    csv_writer = csv.DictWriter(writer, fieldnames=item.keys())
                    csv_writer.writerow(item)
            except Exception as e:
                print(f"写入数据到HDFS文件时出错: {e}")
            return item

    def close_spider(self, spider):
        if spider.name == 'bossUrl':
            self.client = None

