# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy.http import Request
import logging
import json
from pictures.items import avatarItem

logger = logging.getLogger(__name__)

class gifspider(scrapy.Spider):

    name = "gifspider"
    cover = "gifspider"
    title = "gifspider"
    allowed_domains = ["head.jcor.cn","cdn.jcor.cn"]
    start_urls = ["http://head.jcor.cn"]

    def start_requests(self):
         for label in range(1,3):
              logger.debug(f"##############{label}")
              yield Request(url=f"http://head.jcor.cn/api/Avatar/GetAvatarTypeList?typecode=${label}")
        # 测试
        #   yield Request(url=f"http://head.jcor.cn/api/Avatar/GetAvatarListByType?pageIndex=1&typeid=37",callback=self.parse_info,meta={'page':"1",'title':"测试"})


    def parse(self, response):
        #   解析标签
        dict_obj =response.json() 
        # "id":6,
        # "pic_url":"http://head.jcor.cn//Content/Images/avator/96a367b1-eeec-4b10-9069-f09cb70d76fa.png",
        # "title":"男生",
        # "typecode":1,
        # "create_time":"2021-06-21 10:32:43",
        # "sort_id":1
        logger.debug(f"{dict_obj['Data']}")
        # # 每个分类 爬10页
        for item in dict_obj['Data']:
                for page in range(1,11):
                    yield Request(url=f"http://head.jcor.cn/api/Avatar/GetAvatarListByType?pageIndex={page}&typeid={item['id']}",callback=self.parse_info,meta={'page':page,'title':item['title']})

    def parse_info(self, response):
    
        dict_obj =response.json() 
        item = avatarItem()
        imgs=[]
        for item in dict_obj['Data']:
             imgs.append(item['pic_url'])

        item['type']="avatar"
        item['num']=response.meta["page"]
        item['files']=imgs
        item['image_names']=[""]
        item['title']=response.meta["title"]

        yield item


     
       