# coding=utf-8

import re
import sys
import urllib
import os

import requests
import json


local_path = './star_images/'
if not os.path.exists(local_path):
    os.mkdir(local_path)


def get_onepage_urls(onepageurl):
    if not onepageurl:
        print('执行结束')
        return [], ''
    try:
        html = requests.get(onepageurl).text
    except Exception as e:
        print(e)
        pic_urls = []
        fanye_url = ''
        return pic_urls, fanye_url
    pic_urls = re.findall('"objURL":"(.*?)",', html, re.S)
    fanye_urls = re.findall(re.compile(r'<a href="(.*)" class="n">下一页</a>'), html, flags=0)
    fanye_url = 'http://image.baidu.com' + fanye_urls[0] if fanye_urls else ''
    return pic_urls, fanye_url


def down_pic(pic_urls, pic_name, localPath):
    if not os.path.exists(localPath):  # 新建文件夹
        os.mkdir(localPath)
    """给出图片链接列表, 下载图片"""
    for i, pic_url in enumerate(pic_urls):
        try:
            pic = requests.get(pic_url, timeout=15)
            # string = pic_name + "_" + str(i + 1) + '.jpg'
            string = str(i + 1) + '.jpg'
            with open(localPath + '%s' % string, 'wb')as f:
                f.write(pic.content)
                print('成功下载第%s张图片: %s' % (str(i + 1), str(pic_url)))
        except Exception as e:
            print('下载第%s张图片时失败: %s' % (str(i + 1), str(pic_url)))
            print(e)
            continue


def load_config():
    f = open("config04.json", encoding='utf-8')  # 设置以utf - 8 解码模式读取文件，encoding参数必须设置，否则默认以gbk模式读取文件，当文件中包含中文时，会报错
    json_config = json.load(f)
    return json_config


if __name__ == '__main__':
    config = load_config()
    stars = config['stars']
    for star in stars:
        keyword = star['keyword']
        url_init_first = star['url_init_first']
        print('keyword: {}'.format(keyword))
        print('url_init_first: {}'.format(url_init_first))

        url_init = url_init_first + urllib.parse.quote(keyword, safe='/')
        all_pic_urls = []
        onepage_urls, fanye_url = get_onepage_urls(url_init)
        all_pic_urls.extend(onepage_urls)

        fanye_count = 0
        while True:
            onepage_urls, fanye_url = get_onepage_urls(fanye_url)
            fanye_count += 1
            print('第%s页' % fanye_count)
            if fanye_url == '' and onepage_urls == []:
                break
            all_pic_urls.extend(onepage_urls)

        # star_ch_name = ''.join(re.findall('[\u4e00-\u9fa5]', keyword))

        # if not os.path.exists('data/star_images/%s/' % keyword):
        #     os.mkdir('data/star_images/%s/' % keyword)

        down_pic(list(set(all_pic_urls)), '%s' % keyword, local_path + '%s/' % keyword)

