# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import time

# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

import json
import csv
class CrawlannualreportsPipeline:
    def open_spider(self, spider):
        self.json_file = open(f".\\download\\output_{time.time()}.json", "w", encoding="utf-8")
        self.json_file.write("[\n")

        self.csv_file = open(f".\\download\\output_{time.time()}.csv", "w", encoding="utf-8", newline="")
        self.csv_writer = csv.writer(self.csv_file)
        self.csv_writer.writerow(["SecName", "SecName", "SecCode", "AdjunctUrl", "DownURL", "FileName"])

        self.txt_file = open(f".\\download\\output_{time.time()}.txt", "w", encoding="utf-8")

    def process_item(self, item, spider):
        # 保存JSON文件
        json.dump(dict(item), self.json_file, ensure_ascii=False)
        self.json_file.write(",\n")

        # 保存CSV文件
        self.csv_writer.writerow([item["title"], item["sec_name"], item["sec_code"], item["adjunct_url"], item["down_url"], item["file_name"]])

        # 保存到文本文件
        self.txt_file.write(f"Title: {item['title']}\n")
        self.txt_file.write(f"SecName: {item['sec_name']}\n")
        self.txt_file.write(f"SecCode: {item['sec_code']}\n")
        self.txt_file.write(f"AdjunctUrl: {item['adjunct_url']}\n")
        self.txt_file.write(f"DownURL: {item['down_url']}\n")
        self.txt_file.write(f"FileName: {item['file_name']}\n")
        self.txt_file.write("-" * 50 + "\n")

        return item

    def close_spider(self, spider):
        self.json_file.write("]\n")
        self.json_file.close()

        self.csv_file.close()
        self.txt_file.close()
