﻿#!/usr/bin/env python
#-*-coding:utf-8-*-
from  urllib import parse,request
import re
import os
''' acg 在线图集 在线壁纸 目录浏览'''
headers = {'Content-Type': 'application/x-www-form-urlencoded','User-Agent': 'Mozilla/5.0 (Linux; Android 4.3; Nexus 7 Build/JSS15Q) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2307.2 Mobile Safari/537.36'}
global index
index=0
def  seach(v):
    url=str
    if v==1:
        #url='https://acg12.com/category/online-atlas/online-wallpaper/'
        url='https://acg12.com/category/pixiv/pixiv-daily/'
    else:
        #url='https://acg12.com/category/online-atlas/online-wallpaper/page/'+str(v)+'/'
        url='https://acg12.com/category/pixiv/pixiv-daily/page/'+str(v)+'/'
    print(url)
    header=request.Request(url=url,headers=headers)
    page=''
    t=1
    while t<=5:
        try:
            page = request.urlopen(header, timeout=20).read().decode('utf-8')
            break
        except:
            print(v, '页错误')
            t=t+1
    list=set(re.findall(r'<a href="(https://acg12.com/[0-9]+?/)"',page))
    print(list)
    print(len(list))
    down(list,v)
def down(list,pa):
    if(len(list)<1):
        return list
    for url in  list:
        path='E:/tuku/acg_ri/'+str(pa)+'/'
        if not os.path.exists(path):
            os.makedirs(path)
        header=request.Request(url=url,headers=headers)
        page =''
        t=1
        while t<=5:
            try:
                page = request.urlopen(header,timeout=20).read().decode('utf-8')
                break
            except:
                print(url, '错误')
                t=t+1
        #print(page)
        #sp=set(re.findall('<img class=.+? src="(http.+?)"',page))
        sp=set(re.findall('<img src="([https:]?//static.+?)"',page))
        print(sp)
        print(len(sp))
        save(sp,path)
def save(list,path):
    if(len(list)<1):
        return
    global index
    for v in list:
        if v[-4:-3] !='.':
            continue
        if v[1:2]!='h':
            v="http:"+v;
        t=1
        index=index+1
        with open(path+str(index)+v[-4:],"wb") as f:
            header=request.Request(url=v,headers=headers)
            while t<=5:
                try:
                    page=request.urlopen(header,timeout=20).read()
                    f.write(page)
                    print(v)
                    break
                except:
                    t=t+1
                    print('下载中')

if __name__=='__main__':
    for i in range(100):
       seach(i+23)
      #  index=0
        #a=['https://acg12.com/273038/']
        #down(a,1)
