import re
import requests
import os
import time


# 设置路径
rootPath = './'

headers = {
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.9"
}

tasklist = []
albumList = []
pageList = []

starturl = 'http://www.nanrencd.com/tag/%e6%97%a0/'

def taskloader():
    # task amount
    nextUrl = []
    for counter in range(1,20):
        if counter == 1:
            curruntUrl = starturl
            print('now is the starturl')
        else:
            curruntUrl = nextUrl[0][-1]
        print('currunt url is :',curruntUrl)
        html = open_url(curruntUrl)
        nextUrl = re.findall(r'''>(\d*?)</a><a href="(.*?)" class="next">下一页</a>''',html)
        tasklist.append(nextUrl[0][-1])
        print('Now task amount : ',counter)


# 请求url返回html
def open_url(url):
    url = url.strip()
    html = requests.get(url,headers=headers).text
    # print(html)
    return html


# 从主页面找到各个图集的链接
def find_albums(url):
    html = open_url(url)
    # parsing all lables from the main page
    each = re.findall(r'''<a href="(.*?)" class="zoom" rel="bookmark" target="_blank" title="''', html)
    for i in each:
        albumList.append(i)
        print('cunrrunt album link : ',i)
    print('count : ', len(albumList))
    return each

#从图集中的页码找到页码对应链接
def find_each_pageNum(url):
    # parsing links of each page num
    html = open_url(url)
    pagesLink = []
    each = re.findall(r'''<li><a href="(.*?)">(\d+?)</a></li>''',html)
    for i in each:
        i = i[0]
        pageList.append(i)
        print('cunrrunt page link : ',i)


#爬取图集名称
def find_pic_name(url):
    html = open_url(url)
    name = re.findall(r'<h1>(.*?)</h1>',html)
    print(name)
    return name[0]

#从页码链接中爬取 下载并保存对应图片
def download_pic(url,name,path):
    # parse and download each img src from links of page num
    html = open_url(url)
    src = re.findall(r'''<img src="(.*?)" alt="''', html)
    src = src[1]
    img = requests.get(src, headers=headers)
    path = path.strip()
    isExists = os.path.exists(path)
    print('path exists')
    if not isExists:
        os.mkdir(path)
        print('path made!')
    f = open(path + name + '.jpg', 'ab')
    f.write(img.content)
    f.close()
    print('image ',path,name,'downloaded')


for num in range(50):
    try:
        url = 'http://www.nanrencd.com/page/' + str(num)
        find_albums(url)

    except requests.exceptions.ConnectionError:
        print("Connection refused by the server..")
        print("Let me sleep for 5 seconds")
        time.sleep(5)
        print("Was a nice sleep, now let me continue...")
        continue
print('====All albums loaded====')

#循环便于异常处理时，可继续爬取
while True:
        print(albumList)
        print(len(albumList))
        time.sleep(10)
        for each in albumList:
            try:
                print('parsing depth2 ...',each)

                folderName = find_pic_name(each)
                folderPath = rootPath + folderName + '\\'

                picName = 1

                picUrlList = [each + '/1']
                picUrlList += find_each_pageNum(each)
            except requests.exceptions.ConnectionError:
                print("Connection refused by the server..")
                print("Let me sleep for 5 seconds")
                print("ZZzzzz...")
                time.sleep(5)
                print("Was a nice sleep, now let me continue...")
                continue

            for one in picUrlList:
                try:
                    download_pic(one, str(picName), folderPath)
                    picName += 1
                except requests.exceptions.ConnectionError:
                    print("Connection refused by the server..")
                    print("Let me sleep for 5 seconds")
                    print("ZZzzzz...")
                    time.sleep(5)
                    print("Was a nice sleep, now let me continue...")
                    continue

        break
        print('***JOB DONE***')


# 测试：
# find_albums(open_url('http://www.nanrencd.com/page/1'))
# find_each_pageNum(open_url('http://www.nanrencd.com/192681.html'))
# download_pic(open_url('http://www.nanrencd.com/192681.html/3'),'3','./')
# find_pic_name(open_url('http://www.nanrencd.com/192681.html'))

# TODO：断点续爬
# TODO：模块化，适应不同网站
# TODO：多线程
# TODO：防反爬策略
