import requests
import os
import time
from bs4 import BeautifulSoup

use_proxy = False

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/52.0.2743.82 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Upgrade-Insecure-Requests': '1'}

proxies = {"http": "socks5://127.0.0.1:1080", 'https': 'socks5://127.0.0.1:1080'}

def request_get(url):
    if use_proxy:
        return requests.get(url, headers=headers, proxies=proxies)
    else:
        return requests.get(url, headers=headers)

def save_file(file_url, path):
    response = request_get(file_url)
    with open(path, 'wb') as f:
        f.write(response.content)
        f.flush()

# return: (urls, names)
def get_urls_names(text):
    urls = []
    names = []
    lines = text.splitlines()
    for ln in lines:
        ln = ln.strip()
        if ln != '':
            urls.append(ln)
            names.append(ln[ln.rfind('/')+1:])
    return (urls, names)

# return: (title, imgurls_block)
def spider_page(url):
    site = request_get(url)
    content = site.text
    soup = BeautifulSoup(content, 'lxml')
    div1 = soup.find(class_='modal-body')
    imgurls_block = div1.pre.get_text()
    title = soup.h1.get_text()
    return title, imgurls_block

def download_images(imglist, namelist, save_dir):
    i = 0
    for imgurl, name in zip(imglist, namelist):
        i += 1
        fpath = '%s/%d_%s' % (save_dir, i, name)
        if os.path.exists(fpath):
            continue
        print(imgurl)
        save_file(imgurl, fpath)
        # time.sleep(1)

def webp_to_jpg(dir_path):
    os.chdir(dir_path)
    from PIL import Image
    files = get_files_fullpath(os.getcwd(), '.webp')
    for filename in files:
        im = Image.open(filename)
        if im.mode == "RGBA":
            im.load()
            background = Image.new("RGB", im.size, (255, 255, 255))
            background.paste(im, mask=im.split()[3]) 
        save_name = filename.replace('webp', 'jpg')
        im.save(save_name, 'JPEG')
        os.remove(filename)

def get_files_fullpath(dir_path, suffix=''):
    files = list(filter(lambda x: os.path.isfile(os.path.join(dir_path, x)), os.listdir(dir_path)))
    if suffix != '':
        # 留下后缀为suffix的文件
        files = list(filter(lambda x: x.endswith(suffix), files))
    all_fullpath = list(map(lambda x: os.path.join(dir_path, x), files))
    return all_fullpath

def mkdir(dir_path):
    if not os.path.exists(dir_path):
        os.mkdir(dir_path)
    # fs = os.listdir(dirname)
    # if len(fs) > 0:
    #     print('%s not empty!'%(dirname))
    #     os._exit(0)

def format_title(title):
    title = title.replace('?', '')
    title = title.replace('*', '')
    title = title.replace('/', '')
    return title


if __name__ == '__main__':
    title, imgurls_block = spider_page('https://gal.voiux.com/pools/post/10086')
    title = format_title(title)
    dirname = title
    mkdir(dirname)
    imglist, namelist = get_urls_names(imgurls_block)
    download_images(imglist, namelist, dirname)
    webp_to_jpg(dirname)
