# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


class HciPipeline:
    index = 1
    path = '../data/'
    # def process_item2(self, item, articles):
    #     dict_item = dict(item)
    #     file = open("../papers/" + str(self.i) + ".txt", "x", encoding="utf-8")
    #     file.write(dict_item['title'] + '\n')
    #     file.write(", ".join(str(i) for i in dict_item['author']) + '\n')
    #     file.write(dict_item['pub'] + '\n')
    #     file.write(dict_item['date'] + '\n')
    #     file.write(dict_item['abstract'] + '\n')
    #     file.write(dict_item['url'] + '\n')
    #     file.close()
    #     self.i += 1
    #     return dict_item


    def process_item(self, item, articles):
        if self.index <= 9:
            file_name = '00' + str(self.index)
        elif self.index <= 99:
            file_name = '0' + str(self.index)
        else:
            file_name = str(self.index)
        items = dict(item)
        with open('../data/' + file_name + '.txt', 'w', encoding='utf-8') as f:
            f.write('title: ' + items['title'] + '\n')
            f.write('author: ' + ','.join(str(i) for i in items['author']) + '\n')
            f.write('date: ' + items['date'] + '\n')
            f.write('abstract: ' + items['abstract'] + '\n')
            f.write('url: ' + items['url'] + '\n')
            self.index += 1
        return items