# -*- coding:utf8 -*-

import requests, bs4, os, lxml, threading, time

# 获取网站最新页码
def lastPage(url):
    html = requests.get(url)
    soup = bs4.BeautifulSoup(html.text, "lxml")
    elems = soup.select('span[class="current-comment-page"]')
    page = int(elems[0].getText()[1:-1])
    return int(page)
# print(lastPage('http://jandan.net/ooxx'))

# 爬取当前url的图片
def downloadPicture(url, path):
    print('当前页面: {}'.format(url))

    # 获取网页
    html = requests.get(url)
    # 找出图片所在的CSS类
    soup = bs4.BeautifulSoup(html.text, "lxml")
    elems = soup.select('a[href^="//w"]')

    # 获取当前页面中的图片地址并下载到path路径
    for i in range(len(elems)):
        if 'http' in elems[i].get('href'):
            continue
        pictureUrl = 'http:' + elems[i].get('href')

        pictureName = pictureUrl[-20:]
        print('Downloading {}'.format(pictureName) )

        with open(path + '{}'.format(pictureName), 'wb') as result:
            res = requests.get(pictureUrl)
            for chunk in res.iter_content(10000):
                result.write(chunk)

# downloadPicture('http://jandan.net/ooxx/page-11#comments', path)

if __name__ == '__main__':
    path = os.path.basename(os.getcwd())

    searchPath = 'http://jandan.net/ooxx/page-{}#comments'

    page = lastPage('http://jandan.net/ooxx')

    start = int(input('开始位置[1-{}]： '.format(page)))
    end = int(input('结束为止[1-{}]： '.format(page)))

    if start > end :
        start, end = end, start

    threads = []

    startTime = time.time()

    for num in range(start, end+1):
        url = searchPath.format(num)
        obj = threading.Thread(target=downloadPicture, args=[url, path])
        threads.append(obj)
        obj.start()
        #downloadPicture(url, path)

    for th in threads:
        th.join()

    endTime = time.time()
    print('任务完成！ 耗时：{}'.format(endTime-startTime))


