#!/usr/bin/env python3

import os
import sys
from bs4 import BeautifulSoup
import requests

# 韩国代理。如果timeout则需要更换其他代理
proxies = {'http':'112.175.151.10:8080'}

url = 'http://weheartit.com/inspirations/taylorswift?scrolling=true&page='

pic_prefix = 'http://data.whicdn.com/images/'


def get_pic_name(url):
    parts = url.split('/')
    # parts format: ['http:', '', 'data.whicdn.com', 'images', '247527530', 'superthumb.jpg']
    suffix = parts[-1].split('.')[-1]
    return parts[-2] + '.' + suffix


def get_pics_url(url):
    pics = []
    idx = 1
    try:
        webdata = requests.get(url, proxies = proxies, timeout = 10)
        if webdata.status_code != 200:
            webdata.raise_for_status()
    except Exception as e:
        print("Error: download fail: %s." % e)
        return []

    soup = BeautifulSoup(webdata.text, 'lxml')

    imgs = soup.select('img.entry-thumbnail')
    for img in imgs :
        data = {
                'url' : img.get('src'),
                'name' : get_pic_name(img.get('src'))
                }
        pics.append(data)
        idx += 1
    return pics


def download_image(path, url):
    print('downloading %s to %s ... ' % (url, path), end='', flush = True)
    if os.path.exists(path):
        print('exist.')
        return True

    try:
        pic = requests.get(url, stream = True, proxies = proxies)
        if pic.status_code != 200:
            pic.raise_for_status()

    except Exception as e:
        print("Error: download fail: %s." % e)
        return False

    try:
        with open(path, 'wb') as f:
            f.write(pic.content)
            print('success.')
            return True
    except Exception as e:
        print("save file fail: %s." % e)
        if os.path.exists(path):
            os.remove(path)
        print('fail.')
        return False


prefix = '.' + os.sep + 'images' + os.sep
if not os.path.exists(prefix):
    os.makedirs(prefix)
for page in range(1, 20):
    pics = get_pics_url(url + str(page))
    for pic in pics:
        ret = download_image(prefix + pic['name'], pic['url'])

