import re
import requests
import os

header = {'user-agent': 'mozilla/5.0'}


def get_html_text(url):
    try:
        r = requests.get(url, timeout=30, headers=header)
        r.raise_for_status()
        r.encoding = r.apparent_encoding        
        return r.text
    except:
        return ''


def paser_html(html):
    pattern = r'"ObjURL":"(.*?)"'
    reg = re.compile(pattern)
    urls = re.findall(reg, html)  
    return urls


def download(List):
    for url in List:
        try:
            path = 'images/' + url.split('/')[-1]            
            url = url.replace('\\', '')
            if url.strip().endswith('.jpg') or url.strip().endswith('.jpeg') or url.strip().endswith('.gif'):
                pass
            else:
                continue
            r = requests.get(url, timeout=30)
            r.raise_for_status()
            r.encoding = r.apparent_encoding
            if not os.path.exists(path):
                with open(path, 'wb') as f:
                    f.write(r.content)
                    print(url+' has download')
            else:
                print(path + ' image exists')
        except KeyboardInterrupt:
            exit()
        except:
            continue


def get_more_url(num, word):
    ur = []
    url = r'http://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={word}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word={word}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&cg=girl&pn={pn}&rn=30'

    for x in range(1, num+1):
        u = url.format(word=word, pn=30*x)
        ur.append(u)
    return ur


def main():
    n = int(input('下载(n*30)张图片: '))
    word = input('下载的图片关键字: ')
    url = 'http://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1499773676062_R&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word={word}'.format(
        word=word)

    html = get_html_text(url)
    urls = paser_html(html)
    download(urls)
    url1 = get_more_url(n, word)
    for i in range(n):
        html1 = get_html_text(url1[i])
        urls1 = paser_html(html1)
        download(urls1)


main()


