
from urllib import request
import re
import sys
import random
import os
import json

def find_all_pages_url(start_page_url):
    headers = {
        'User-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Mobile Safari/537.36'
    }

    # req = request.Request(url=start_page_url, headers=headers)
    req = request.Request(url=start_page_url)
    response = request.urlopen(req)
    html = response.read().decode('utf-8')
    # print(html)
    page_urls_regex = r'\<a\sclass=\"page-numbers\"\shref=\"(?P<page_url>[^\"]+)\"\>'
    page_urls = re.findall(page_urls_regex, html)
    page_urls = [start_page_url] + page_urls
    return page_urls

def create_task(page_urls, comic_name):
    # comic_name = re.findall(r'.+\/(?P<comic_name>.+)\.html')[0]
    chapter_name = re.findall(r'.+\/(?P<comic_name>.+)\.html', page_urls[0])[0]
    headers = {
        'User-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Mobile Safari/537.36'
    }
    img_urls = []
    for page_url in page_urls:
        req = request.Request(url=page_url, headers=headers)
        response = request.urlopen(req)
        html = response.read().decode('utf-8')
        image_regex = r'<a\shref=\"(?P<img_url>[^\"]+?)\?.+\">'
        new_img_urls = re.findall(image_regex, html)
        img_urls = img_urls + new_img_urls

    img_urls = list(map(replace_img, img_urls))
    print(img_urls)

    task_file = open('./task.json', 'r', encoding='UTF8')
    download_task = dict(json.load(task_file))
    task_file.close()

    if download_task.get(comic_name) is None:
        download_task[comic_name] = {}
    if len(img_urls) > 0:
        chapter_task = {}
        count = 0
        for img_url in img_urls:
            count = count + 1
            match = re.findall(r'\/(?P<image_name>\d+)\.webp$', img_url)
            if match and match[0]:
                image_name = match[0]
            else:
                image_name = str(count)
            chapter_task[image_name] = img_url
        download_task[comic_name][chapter_name] = chapter_task

    task_file = open('./task.json', 'w', encoding='UTF8')
    json_data = json.dumps(download_task, ensure_ascii=False)
    task_file.write(json_data)
    task_file.close()
    print('Finished index chapter:\t' + chapter_name)


def replace_img(url):
    return url.replace('i0.wp.com/pic', 'img')
    # return url

# site name always change
def replace_site(url):
    # return url.replace('www.4khd.com', 'pagyx.uuss.uk')
    return url.replace('www.4khd.com', 'xfgzk.ssuu.uk')



# start_page_urls = [
#     'https://www.4khd.com/content/14/xlevelvol005-yeha-aprons.html',
#     'https://slbhh.xxtt.ink/content/14/xlevelvol001-yeha-yeo-waxplay.html',
#     'https://slbhh.xxtt.ink/content/03/pure-media-vol277-yeha-expensive-and-delicious-naked-hen.html',
#     'https://slbhh.xxtt.ink/content/28/pure-media-vol265-part-01-yeha-hungry-bitch-in-the-stairwell.html',
#     'https://slbhh.xxtt.ink/content/28/pure-media-vol265-part-02-yeha-hungry-bitch-in-the-stairwell.html',
#     # 'https://slbhh.xxtt.ink/content/20/pure-media-vol260-part-01-yeha-stick-my-tail-in-the-hole.html',
#     # 'https://slbhh.xxtt.ink/content/20/pure-media-vol260-part-02-yeha-stick-my-tail-in-the-hole.html',
#     # 'https://slbhh.xxtt.ink/content/17/pure-media-vol255-part-01-yeha-geishas-invitation-epilogue.html',
#     # 'https://slbhh.xxtt.ink/content/17/pure-media-vol255-part-02-yeha-geishas-invitation-epilogue.html'
# ]
#
# start_page_urls = list(map(replace_site, start_page_urls))
# # print(start_page_urls)
#
# for start_page_url in start_page_urls:
#     page_urls = find_all_pages_url(start_page_url)
#     page_urls = list(map(replace_site, page_urls))
#     print(page_urls)
#     create_task(page_urls,'yeha')

start_page_urls = [
    # 'https://pagyx.uuss.uk/content/10/jvid-xiao-mi-en-yanyan-my-friends-sexy-sister.html',
    # 'https://pagyx.uuss.uk/content/28/jvid-angel-random-private-photos.html',
    'https://pagyx.uuss.uk/content/21/jvid-yanyan-angel-online-photo-shoot-model-shallow-rules-yanyan-was-forced-to-take-off-her-clothes-and-take-nude-photos-too-much-which-caused-the-ejaculation-master-to-think-of-her.html',
    'https://pagyx.uuss.uk/content/09/jvid-yanyan-angel-drunk-on-birthday-forced-sexual-assault-yanyan-was-taken-back-by-a-friend-and-stripped-naked-and-violated-her-legs-and-exposed-mei-bao-took-the-initiative-to-touch-her-penis.html',
    # 'https://pagyx.uuss.uk/content/07/jvid-yanyan-angel-flexible-tongue-yanyan.html',
    # 'https://pagyx.uuss.uk/content/09/jvid-angel-tiny-lily.html',
    # 'https://pagyx.uuss.uk/content/24/jvid-yanyan-angel-forced-restraint-of-m-attribute-girl-yanyan-goddess.html',
    # 'https://pagyx.uuss.uk/content/14/jvid-yanyan-angel-piaopiao-the-most-colored-girl-in-lily-gods-try-chenchens-special-guest-appearance-the-biggest-breakthrough.html',
    # 'https://pagyx.uuss.uk/content/24/jvid-yanyan-angel-is-trapped-in-the-wild-and-enters-the-rabbits-honey-hole.html',
    # 'https://pagyx.uuss.uk/content/24/jvid-yanyan-angel-e-sports-girl.html',
    '',
]

start_page_urls = list(map(replace_site, start_page_urls))
# print(start_page_urls)

for start_page_url in start_page_urls:
    page_urls = find_all_pages_url(start_page_url)
    page_urls = list(map(replace_site, page_urls))
    print(page_urls)
    create_task(page_urls,'yanyan')
