import requests
from bs4 import BeautifulSoup as BS
import re 
import urllib.request
import urllib.parse
import os
import json
import csv
from datetime import datetime
import time
def UrlManager():
    new_url=[]
    url='https://apps.game.qq.com/cgi-bin/ams/module/ishow/V1.0/query/workList_inc.cgi?activityId=2735&sVerifyCode=ABCD&sDataType=JSON&iListNum=20&totalpage=0&page=1&iOrder=0&iSortNumClose=1&jsoncallback=&iAMSActivityId=51991&_everyRead=true&iTypeId=2&iFlowId=267733&iActId=2735&iModuleId=2735&_='
    for i in range(21):
        new_url.append(re.sub('page=1','page='+str(i),url))
    return new_url
def HtmlDownload(url):
    r=requests.get(url)
    if r.status_code==200:
        r.encoding='utf-8'
        return r.text
def HtmlParse(r):
    image_ur=set()
    image_list=re.compile('(http(%\w*)*)').findall(r)
    for i in image_list:
        image_none=urllib.parse.unquote(i[0],'utf-8')
        image_none=re.sub('sProdImgNo_[0-9]','sProdImgNo_7',image_none)
        image_ur.add(re.sub('200','0',image_none))
    return image_ur
def DataOut(image_ur,num):
   if num:
        n=0
        path="F://King_honor/"+str(num)+'/'
        print(path)
        for i in image_ur:
            with open(path+str(n)+'.jpg','wb') as f:
                r=requests.get(i)
                if r.status_code==200:
                    f.write(r.content)
              
                    n+=1

def SpiderMan():
    pass

if __name__=="__main__":
    num=0
    for iulr in UrlManager(): 
        if not os.path.isdir("F://King_honor/"+str(num)+'/'):
            os.makedirs("F://King_honor/"+str(num)+'/')
        page_html=HtmlDownload(iulr)
        image_url=HtmlParse(page_html)
        DataOut(image_url,num)
        print('完成{}的下载'.format(num))
        num+=1

        