# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import os

html_template = """
        <!DOCTYPE html>
        <html lang="en">
        <head>
            <meta charset="UTF-8">
        </head>
        <body>
        {content}
        </body>
        </html>
        """


class CrawlDemoPipeline(object):
    def process_item(self, item, spider):
        return item


class OschinaPipeline(object):
    def process_item(self, item, spider):
        url = item['url']
        content = item['content']

        file_name = url.split('/')[-1] + '.html'
        # 拼接要保存的HTML文件名
        file_name = os.path.join(os.path.abspath('.'), 'htmls', file_name)

        html = html_template.format(content=content)
        # 将拼接好的html写入文件w
        with open(file_name, 'w+', encoding='utf-8') as f:
            f.write(html)


class JsonWriterPipeline(object):

    def __init__(self):
        self.file = open('items.json', 'w')

    def process_item(self, item, spider):
        line = json.dumps(dict(item), ensure_ascii=False) + "\n"
        self.file.write(line)
        return item


class OutputPipeline(object):
    def process_item(self, item, spider):
        print("item==>", item)
