# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import csv

from itemadapter import ItemAdapter

import openpyxl
import urllib.request

# def getHtml(url):
#     html = urllib.request.urlopen(url,timeout=10).read()
#     return html
# #通过设置timeout参数用于解决ConnectionResetError问题
# def saveHtml(file_name,file_content):
#     with open(file_name.replace('/','_')+'.csv','wb') as f:
#         f.write(file_content)
#
#
# class BaiduGfPipeline:
#     def process_item(self, item, spider):
#         name = ''.join(item['title'])
#         html = getHtml(item['visit_url'][0])  # 根据url请求并获取响应内容
#         saveHtml(name, csv) #将响应的内容保存为html格式
#         print("该网页爬取结束")
#         return item

class EXcelPipeline:

    def __init__(self):
        self.wb = openpyxl.Workbook()#工作簿
        self.ws = self.wb.active#工作表
        self.ws.title = '爬取百度'
        self.ws.append(('标题','网址'))

    def close_spider(self,spride):
        self.wb.save('百度搜索数据.xlsx')

    def process_item(self, item, spider):
        title = item.get('title','')#网页标题
        visit_url = item.get('visit_url','')#网页url
        self.ws.append((str(item['title']),str(item['visit_url'])))
        return item
