# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import re

class Zuoye23Pipeline:
    def process_item(self, item, spider):
        # bianhao = '',  # 项目编号
        # laiyuan = '',  # 采集来源
        # title = '',  # 采集标题
        # url = '',  # 来源详细网址链接
        # shijian = '',  # 详细招标时间
        # key = '',  # 关键字
        # print('leixing:',type(item),item['laiyuan'],type(item['laiyuan']))
        with open("database.html", 'a',encoding='utf-8') as f:  # 把数据录入到html文件吧；就不写数据库了；
            # f.write(caiji_data)
            f.write('<table style="width:100%;margin-bottom:15px;" border=1>'
                    '<tr>'
                    '<td>项目编号:'+item['bianhao']+'</td>'
                    '<td>采集来源:'+item['laiyuan']+'</td>'
                    '<td>采集标题:'+item['title']+'</td>'
                    '<td>来源网址:'+item['url']+'</td>'
                    '<td>'+item['shijian']+'</td>'
                    '<td>搜索关键字:'+item['key']+'</td>'
                    '</tr>'
                    '</table>\n')
            pass
        print('*=' * 100)
        return item
