# -*- coding: utf-8 -*-
import os
import re
import requests
from bs4 import BeautifulSoup

# EOF occurred in violation of protocol (_ssl.c:600)
# solve: pip install -U requests[security]
# pip install -U requests[socks]

use_proxy = True

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/52.0.2743.82 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Upgrade-Insecure-Requests': '1'}

proxies = {"http": "socks5://127.0.0.1:1080", 'https': 'socks5://127.0.0.1:1080'}


# 这里的one_page指的是分页的page, 不是单张图片, 单张图片在此脚本称为pic
def download_one_page(url, save_path, start_pic_no=1):
    cur_pic_no = start_pic_no
    if not save_path.endswith('/'):
        save_path += '/'
    html = request_get(url).text
    soup = BeautifulSoup(html, 'lxml')
    gdtms = soup.find_all(class_='gdtm')
    title = re.sub(r"[/\\:*?\"<>|]", "-", soup.h1.get_text())
    print(title + ' total:' + str(len(gdtms)) + 'p')
    book_dir_name = title
    if not os.path.exists(save_path + book_dir_name):
        os.mkdir(save_path + book_dir_name)
    for gdtm in gdtms:
        pic_page_url = gdtm.a.get('href')
        pic_file_name = str(cur_pic_no) + '.' + get_book_image_format()
        pic_file_full_path = save_path + book_dir_name + '/' + pic_file_name
        if os.path.exists(pic_file_full_path):
            print(pic_file_name + ' existed, pass...')
            cur_pic_no = cur_pic_no + 1
            continue
        print('Downloading... ' + pic_file_name + ' ', end='')
        pic_url = get_pic_url(pic_page_url)
        try:
            save_file(pic_url, pic_file_full_path)
        except Exception as e:
            print('fail! ' + pic_url)
            print(e)
        else:
            print('ok!')
        cur_pic_no = cur_pic_no + 1
    print(f'[{start_pic_no:d}-{cur_pic_no:d}] finish!')


def save_file(file_url, path):
    response = request_get(file_url)
    with open(path, 'wb') as f:
        f.write(response.content)
        f.flush()


def get_pic_url(pic_page_url):
    html = request_get(pic_page_url).text
    soup = BeautifulSoup(html, 'lxml')
    imgs = soup.find_all(id="img")
    for img in imgs:
        return img['src']
    raise Exception(f'no image in {pic_page_url}')


def request_get(url):
    if use_proxy:
        return requests.get(url, headers=headers, proxies=proxies)
    else:
        return requests.get(url, headers=headers)


# 获取分页数
def get_page_size(book_url, per_page_size):
    html = request_get(book_url).text
    soup = BeautifulSoup(html, 'lxml')
    gdt2_tds = soup.find_all(class_='gdt2')
    pic_total = 0
    for td in gdt2_tds:
        ret = re.match(r'(\d+)\s*pages', td.get_text())
        if ret is not None:
            pic_total = ret.group(1)
            break
    return (int(pic_total) // per_page_size) + 1


def download_book(url, save_path):
    per_page_size = 40
    start_pic = 1
    page_size = get_page_size(url, per_page_size)
    print('Total: ' + str(page_size) + ' webPages')
    for i in range(page_size):
        if i == 0:
            download_one_page(url, save_path, start_pic)
        else:
            download_one_page(url + '?p=' + str(i), save_path, start_pic)
        start_pic += per_page_size


def download_book_with_retry(book_url, save_path, retry_time=3, retry_period=2):
    import time
    for i in range(0, retry_time):
        while True:
            try:
                download_book(book_url, save_path)
            except Exception as e:
                time.sleep(retry_period)
                print('\nRetry book task... error:%s' % e)
                continue
            break


def get_book_image_format():
    return "jpg"


if __name__ == '__main__':
    use_proxy = True
    book = 'https://e-hentai.org/g/1783285/c1f2ef9f15/'
    save_dir = 'img/'
    download_book_with_retry(book, save_dir)
