# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import openpyxl
from scrapy.http import Request
import scrapy
from scrapy.pipelines.images import ImagesPipeline
from scrapy.pipelines.files import FilesPipeline
import re
import os


class picturesPipeline:

   def __init__(self):
        # self.wb = openpyxl.load_workbook('data/体检表.xlsx')

        self.wb=openpyxl.Workbook()
        self.ws=self.wb.active
        self.ws.title = '图片信息'
        self.ws.append(['名字','封面','模板'])
        pass
   
   def close_spider(self,spider):
       self.wb.save('图片信息.xlsx')

   def process_item(self, item, spider):
        try:
            name = item['name']
            cover = item['cover']
            images = item['images']
         
            print('名字\t', name)
            print('封面\t', cover)
            print('模板图片\t', images)
            str1= ','.join(images)
            print('------------------------------\n')
            self.ws.append([name,cover,str1])
            return item
        except Exception as err:
            pass





class imgsPipeLine(ImagesPipeline):

    # 根据图片地址进行图片数据的请求
   def get_media_requests(self, item, info):
        # 下载图片，如果传过来的是集合需要循环下载
        # meta里面的数据是从spider获取，然后通过meta传递给下面方法：file_path
        # pic 分类下就是模板，名字用了截取 分页没问题
        # avatar 分类下avatar 名字是索引 分页有问题
        num=''
        if item['type']=="avatar":
           num=item['num']
        for index in range(len(item['images'])):
            yield Request(url=item['images'][index],meta={'url':item['images'][index],'num':num,'type':item['type'],'title':item['title'],'name':item['image_names'][index] if   index < len(item['image_names']) else "","index":index})
        # 封面也得存一张
        try:
            if item['cover']:
                 yield Request(url=item['cover'],meta={'url':item['images'][index],'num':num,'type':item['type'],'title':item['title'],'name':item['image_names'][index] if index < len(item['image_names']) else "","index":'_cover'})
        except:
            print("cover null")

  
#    def item_completed(self, results, item, info):
#         # 是一个元组，第一个元素是布尔值表示是否成功
    
#         return item

     # 重命名，若不重写这函数，图片名为哈希，就是一串乱七八糟的名字
   def file_path(self, request, response=None, info=None, *, item=None):
        # 接收上面meta传递过来的图片名称
        filename = request.meta['name']
        index = request.meta['index']
        title = request.meta['title']
        type = request.meta['type']
        num = request.meta['num']
        url = request.meta['url']
        #  过滤书签
        pattern = r"《(.*?)》"
        result = re.findall(pattern, filename)
        if len(result)==0:
            # 有可能是gif
            target_name, ext = os.path.splitext(os.path.basename(url))
            return f"{type}/{title}/{num}{index}{ext}"
            # return f"{type}/{title}/{num}{index}.jpg"

        else:
            return f"{type}/{title}/{result[0]}/{index}.jpg"
class GifPipeline(FilesPipeline):

    # 根据图片地址进行图片数据的请求
   def get_media_requests(self, item, info):
        # 下载图片，如果传过来的是集合需要循环下载
        # meta里面的数据是从spider获取，然后通过meta传递给下面方法：file_path
        # pic 分类下就是模板，名字用了截取 分页没问题
        # avatar 分类下avatar 名字是索引 分页有问题
        num=''
        if item['type']=="avatar":
           num=item['num']
        for index in range(len(item['files'])):
            yield Request(url=item['files'][index],meta={'url':item['files'][index],'num':num,'type':item['type'],'title':item['title'],'name':item['image_names'][index] if   index < len(item['image_names']) else "","index":index})
      
  
#    def item_completed(self, results, item, info):
#         # 是一个元组，第一个元素是布尔值表示是否成功
    
#         return item

     # 重命名，若不重写这函数，图片名为哈希，就是一串乱七八糟的名字
   def file_path(self, request, response=None, info=None, *, item=None):
        # 接收上面meta传递过来的图片名称
        filename = request.meta['name']
        index = request.meta['index']
        title = request.meta['title']
        type = request.meta['type']
        num = request.meta['num']
        url = request.meta['url']
        #  过滤书签
        pattern = r"《(.*?)》"
        result = re.findall(pattern, filename)
        if len(result)==0:
            # 有可能是gif
            target_name, ext = os.path.splitext(os.path.basename(url))
            return f"{type}/{title}/{num}{index}{ext}"
            # return f"{type}/{title}/{num}{index}.jpg"

        else:
            return f"{type}/{title}/{result[0]}/{index}.jpg"
