import time
import shutil
import urllib
import subprocess
from bs4 import BeautifulSoup

from playwright.sync_api import sync_playwright
from base_function import *

BLOG_DIR="blog_dir/"
TEMP_DIR = "temp_dir/"
CLASS_PATH = "download_class.json"
BLOG_PATH = "download_blog.json"
HOME_URL = "https://www.nicezzz.com/"
XUNLEI_DIR="E:/迅雷下载/"
BROWSER_DIR="E:/浏览器下载/"
EXTRACT_DIR=XUNLEI_DIR+"解压文件夹/"
USER_DATA_DIR = ["C:/GoogleChromePortable/Data/profile/", "C:/Users/wyh/AppData/Local/Microsoft/Edge/User Data/"]
EXECUTABLE_PATH = ["C:/GoogleChromePortable/App/Chrome-bin/Chrome.exe", "C:/Program Files (x86)/Microsoft/Edge/Application/msedge.exe"]


def download_nicezzz_class():
    json_data1 = {"分类":{}, "链接":{}}
    with sync_playwright() as playwright:  # playwright = sync_playwright()不可用
        browser = playwright.chromium.launch_persistent_context(user_data_dir=USER_DATA_DIR[0], executable_path=EXECUTABLE_PATH[0], channel="chrome", headless=False, args=["--start-maximized"], no_viewport=True)
        page = browser.new_page()
        page.goto(HOME_URL, wait_until="commit")
        while 1:
            html_data = page.content()
            soup1 = BeautifulSoup(html_data, features="lxml")
            soup2 = soup1.find("ul", attrs={"class": "nav navbar-nav"})
            if soup2 is not None:
                for i, soup3 in enumerate(soup2.find_all("li", recursive=False)):
                    if i==0:
                        continue
                    soup4 = soup3.find("a")
                    text1 = soup4.contents[0]
                    json_data1["分类"][text1] = []
                    soup5 = soup3.find("ul")
                    if soup5 is not None:
                        for soup6 in soup5.find_all("a"):
                            text2 = soup6.contents[0]
                            json_data1["分类"][text1].append(text2)
                            json_data1["链接"][soup6["href"]]=text1+"/"+text2
                    else:
                        json_data1["链接"][soup4["href"]]=text1
                save_json(json_data1, CLASS_PATH)
                return


def dowload_nicezzz_blog(index=1, update_download=0):
    json_data2 = read_json(BLOG_PATH)
    repeat_number1=3 # 更新下载时，检测前面3个页面，若都没有新的BLOG，则退出
    with sync_playwright() as playwright:  # playwright = sync_playwright()不可用
        browser = playwright.chromium.launch_persistent_context(user_data_dir=USER_DATA_DIR[0], executable_path=EXECUTABLE_PATH[0], channel="chrome", headless=False)
        page = browser.new_page()
        while index>0:
            flag=0
            if index==1:
                url = HOME_URL
            else:
                url = HOME_URL + "page/" + str(index)
            print(url)
            try:
                page.goto(url, wait_until="commit")
            except:
                time.sleep(10)
                page.reload()
            repeat_number2 = 0
            while 1:
                html_data = page.content()
                soup1 = BeautifulSoup(html_data, features="lxml")
                soup2 = soup1.find("div", attrs={"class": "f404"})
                if soup2 is not None:
                    print("已全部下载完成")
                    return
                soup2 = soup1.find("div", attrs={"class": "posts-row ajaxpager tab-pane fade in active"})
                if soup2 is not None:
                    soup3 = soup2.find_all("div", attrs={"class":"item-thumbnail"})
                    if len(soup3)==20:
                        if update_download==0: # 初次下载
                            repeat_url = []
                            for soup4 in soup3:
                                url = soup4.a["href"]
                                print(url)
                                if url not in json_data2:
                                    flag=1
                                    json_data2[url] = 0
                                else:
                                    repeat_url.append(url)
                            if len(repeat_url)>0:
                                print("出现重复项："+str(repeat_url))
                                input("是否继续下载：")
                        else: # 更新下载
                            temp_data = {}
                            for soup4 in soup3:
                                url = soup4.a["href"]
                                # print(url)
                                if url not in json_data2:
                                    flag=1
                                    temp_data[url] = 0
                            if flag==1:
                                print(temp_data)
                            temp_data.update(json_data2)
                            json_data2 = temp_data
                        save_json(json_data2, BLOG_PATH)
                        break
                    else: # 多次刷新页面，页面不存在20个BLOG，主要针对最后一页
                        repeat_number2+=1
                        if repeat_number2>3:
                            url = HOME_URL + "page/" + str(index+1)
                            page.goto(url, wait_until="commit")
                            time.sleep(10)
                            html_data = page.content()
                            soup1 = BeautifulSoup(html_data, features="lxml")
                            soup2 = soup1.find("div", attrs={"class": "f404"})
                            if soup2 is not None:
                                repeat_url = []
                                for soup4 in soup3:
                                    url = soup4.a["href"]
                                    print(url)
                                    if url not in json_data2:
                                        flag=1
                                        json_data2[url] = 0
                                    else:
                                        repeat_url.append(url)
                                if len(repeat_url)>0:
                                    print("出现重复项："+str(repeat_url))
                                    input("是否继续下载：")
                                save_json(json_data2, BLOG_PATH)
                                print("已全部下载完成")
                                return
                            else:
                                repeat_number2 = 0
            if update_download==0 or flag==1:
                index+=1
            elif update_download==1 and flag==0 and repeat_number1>1:
                index+=1
                repeat_number1-=1
            else:
                break
    pass


def get_dowload_link(context, middle_link):
    page = context.new_page()
    dowload_result = 0
    dowload_link = ""
    for url in middle_link:
        # print("\r{} ".format(url), end="")
        while 1:
            try:
                page.goto(url, wait_until="commit", timeout=300000)
                break
            except Exception as e:
                if "ERR_HTTP_RESPONSE_CODE_FAILURE" in str(e):
                    dowload_result=-120
                    print("\r{} : 当前无法使用此页面。".format(url))
                    break
                else:
                    print(e)
                page.close()
                time.sleep(15)
                page = context.new_page()
        while dowload_result!=-120:
            try:
                html_data = page.content()
            except:
                time.sleep(3)
                continue
            save_html(html_data, "temp_dir/temp.html")
            soup1 = BeautifulSoup(html_data, features="lxml")
            soup2 = soup1.find("h1", attrs={"data-translate":"block_headline"})
            if soup2 is not None:
                s = "Sorry, you have been blocked"
                if soup2.text==s:
                    dowload_result+=-1
                    print("\r{} : 对不起，您已被屏蔽。您无法访问shrinkme.org。".format(url))
                    break
            soup2 = soup1.find("div", attrs={"class":"col text-center mt-4"})
            if soup2 is not None:
                if "此檔案僅限購買高級帳戶的高級用戶下載" in soup2.text:
                    dowload_result+=-10
                    print("\r{} : 此档案仅限购买高级帐户的高级用户下载。".format(url))
                    break
            if "https://stfly.xyz/" in page.url or "https://anhdep24.com/" in page.url:
                dowload_result+=-100
                print("\r{} : 安全验证失败，无法通过谷歌服务器获取验证码。".format(url))
                break
            soup2 = soup1.find("span", attrs={"jsselect": "heading"})
            if soup2 is not None:
                n = 30
                if "This site can’t be reached" in soup2.text:
                    while n>0:
                        n-=1
                        print("This site can’t be reached")
                        try:
                            time.sleep(30)
                            page.reload()
                            break
                        except:
                            if "https://anhdep24.com/" in page.url:
                                break
                elif "无法访问此网站" in soup2.text:
                    while n>0:
                        n-=1
                        print("{} : 无法访问此网站".format(url))
                        try:
                            time.sleep(30)
                            page.reload()
                            break
                        except:
                            if "https://anhdep24.com/" in page.url:
                                break
                if n==0:
                    return 0, ""
            soup2 = soup1.find("h1")
            if soup2 is not None:
                if "404 Not Found" in soup2.text or "400 Bad Request" in soup2.text:
                    dowload_result=-130
                    print("\r{} : 链接已失效。".format(url))
                    break
            soup2 = soup1.find("div", attrs={"class":"core-msg spacer"})
            if soup2 is not None:
                if "rosefile.net 需要首先检查您的连接安全性。" in soup2.text:
                    dowload_result=-140
                    print("\r{} : rosefile.net 需要首先检查您的连接安全性。".format(url))
                    return dowload_result, url
                elif "www.77file.com 需要首先检查您的连接安全性。" in soup2.text:
                    dowload_result=-145
                    print("\r{} : www.77file.com 需要首先检查您的连接安全性。".format(url))
                    return dowload_result, url
            soup2 = soup1.find("div", attrs={"class":"info_box_msg"})
            if soup2 is not None:
                if "此文件已被用户删除，暂时无法访问！" in soup2.text:
                    dowload_result=-150
                    print("\r{} : 此文件已被用户删除，暂时无法访问！".format(url))
                    break
            soup2 = soup1.find("span", attrs={"class":"inline-block"})
            if soup2 is not None:
                if "Gateway time-out" in soup2.text:
                    dowload_result=-160
                    print("\r{} : Gateway time-out, Error code 504".format(url))
                    break
                elif "Bad gateway" in soup2.text:
                    dowload_result=-165
                    print("\r{} : Bad gateway, Error code 502".format(url))
                    break
            soup2 = soup1.find("span", attrs={"class": "desc"})
            if soup2 is not None:
                temp_url = "https:" + soup2.find("a")["href"]
                page.goto(temp_url, wait_until="commit")
                page.get_by_role("button", name="Submit").click()
                time.sleep(3)
                n1 = len(context.pages)
                while 1:
                    while "https://ouo.io/go/" in page.url or "https://ouo.press/go/" in page.url:
                        try:
                            n2 = len(context.pages)
                            if n2>n1:
                                context.pages[n2-1].close()
                            html_data = page.content()
                            save_html(html_data, "temp_dir/temp.html")
                            page.get_by_role("button", name="Get Link").first.click()
                            break
                        except Exception as e:
                            page.mouse.move(10, 10)
                            page.mouse.down()
                            pass
                    try:
                        html_data = page.content()
                    except Exception as e:
                        if "https://www.mediafire.com/download_repair.php" in page.url:
                            dowload_result=-170
                            print("\r{} : Mediafire : generating new download key".format(url))
                            return dowload_result, url
                    if "https://www.mediafire.com/error.php" in page.url:
                        dowload_result=-175
                        print("\r{} : Mediafire : Well, looks like we can’t go any further".format(url))
                        return dowload_result, url
                    if "/error/302.html" in page.url:
                        dowload_result=-176
                        print("\r{} : Sorry, TeraBox is not available in current area".format(url))
                        return dowload_result, url
                    soup1 = BeautifulSoup(html_data, features="lxml")
                    soup2 = soup1.find("a", attrs={"class": "input popsok"})
                    if soup2 is not None:
                        dowload_link = soup2["href"]
                        print(dowload_link)
                        return 1, dowload_link
            if "https://gadget.pernahsukses.com/" in page.url:
                dowload_result=-185
                print("\r{} : 错误链接：https://gadget.pernahsukses.com/".format(url))
                return dowload_result, ""
            soup2 = soup1.find("a", attrs={"class": "input popsok"})
            if soup2 is not None:
                dowload_link = soup2["href"]
                print(dowload_link)
                return 1, dowload_link
    page.close()
    return dowload_result, dowload_link


def extract_nicezzz_file(file_path, download_dir, extract_password=""):
    if extract_password=="":
        result = extract_file(file_path, EXTRACT_DIR, "sssins.com")
        if result<0:
            result = extract_file(file_path, EXTRACT_DIR, "vx666878787")
    else:
        result = extract_file(file_path, EXTRACT_DIR, extract_password)
    result -= 1
    if result<0:
        print("文件解压失败："+file_path)
    else:
        names = os.listdir(EXTRACT_DIR)
        path1 = EXTRACT_DIR+names[0]
        path2 = download_dir+names[0]
        shutil.move(path1, path2)
        os.remove(file_path)
        result = 1
    return result


def dowload_nicezzz_single_file(blog_url):
    key = get_url_key(blog_url)
    json_path = BLOG_DIR + key + ".json"
    json_data = {"链接":blog_url, "标题":"", "密码":"", "标签":[], "下载":""}
    with sync_playwright() as playwright:  # playwright = sync_playwright()不可用
        while 1:
            try:
                browser = playwright.chromium.connect_over_cdp("http://127.0.0.1:9666")
                break
            except Exception as e:
                if check_chrome_statue():
                    os.system("taskkill /f /im chrome.exe")
                command = r"C:/GoogleChromePortable/App/Chrome-bin/Chrome.exe --remote-debugging-port=9666 --user-data-dir=C:/GoogleChromePortable/Data/profile/"
                subprocess.Popen(command)
                time.sleep(10)
        while len(browser.contexts)>1:
            browser.contexts[1].close()
        context = browser.contexts[0]
        while len(context.pages)>1:
            context.pages[1].close()
        try:
            page = context.new_page()
            page.goto(blog_url, wait_until="commit")
        except Exception as e:
            if check_chrome_statue():
                os.system("taskkill /f /im chrome.exe")
            command = r"C:/GoogleChromePortable/App/Chrome-bin/Chrome.exe --remote-debugging-port=9666 --user-data-dir=C:/GoogleChromePortable/Data/profile/"
            subprocess.Popen(command)
            time.sleep(10)
            browser = playwright.chromium.connect_over_cdp("http://127.0.0.1:9666", timeout=300000)
            context = browser.contexts[0]
            page = context.new_page()
            page.goto(blog_url, wait_until="commit")
        while 1:
            html_data = page.content()
            save_html(html_data, "temp_dir/temp.html")
            soup1 = BeautifulSoup(html_data, features="lxml")
            soup2 = soup1.find("h1", attrs={"class":"article-title"})
            if soup2 is not None:
                text = soup2.text
                if text[0]==" ":
                    text= text[1:]
                text = text.replace("／", "-")
                json_data["标题"]=text
            soup2 = soup1.find("div", attrs={"class":"theme-box wp-posts-content"})
            if soup2 is not None:
                for soup3 in soup2:
                    if "解压密码" in soup3.text:
                        text1 = soup3.text
                        text2 = text1[text1.find("：")+1:]
                        if "\n" in text2:
                            text2 = text2[:text2.find("\n")]
                        json_data["密码"] = text2
                        break
                soup3 = soup1.find("div", attrs={"class":"theme-box article-tags"})
                if soup3 is None:
                    continue
                soup3 = soup3.find_all("a")
                for soup4 in soup3:
                    json_data["标签"].append(soup4.text.replace("# ", ""))
                soup3 = soup1.find_all("button", attrs={"class":"custom-btn btn-12"})
                temp_urls = []
                for soup4 in soup3:
                    url = soup4.find("a")["href"]
                    if "http" in url and url not in temp_urls and soup4.text!="GOGOGO":
                        temp_urls.append(url)
                if len(temp_urls) ==0:
                    print("下载链接不存在")
                    dowload_result, dowload_link = -300, ""
                else:
                    dowload_result, dowload_link = get_dowload_link(context, temp_urls)
                break
        browser.close()
    json_data["下载"]=dowload_link
    save_json(json_data, json_path)
    if dowload_result>0: # 使用迅雷下载文件
        xunlei_download(dowload_link, key)
    return dowload_result


def dowload_nicezzz_file(download_dir):
    json_data1 = read_json(CLASS_PATH)
    json_data2 = read_json(BLOG_PATH)
    urls1 = []
    urls2 = []
    for url in json_data2:
        if json_data2[url]==0: # 未获取下载链接  or json_data2[url]==-140
            urls2.append(url)
        elif json_data2[url]==1 or json_data2[url]==1.5: # 已获取下载链接，未完成下载
            urls1.append(url)
    # 解压文件
    print("解压文件：")
    number1 = len(urls1)
    if number1==0:
        print("无")
    save_dir = [XUNLEI_DIR, BROWSER_DIR]
    for i, url in enumerate(urls1):
        name1 = get_url_key(url)
        temp_path1 = BLOG_DIR + name1 + ".json"
        temp_data = read_json(temp_path1)
        flag = 0
        for temp_dir in save_dir:
            for name in os.listdir(temp_dir):
                if name[-3:]=="(1)" or name[-7:]=="(1).htm":
                    temp_name = name
                    name = name.replace("(1)", "")
                    os.rename(temp_dir+temp_name, temp_dir+name)
                elif name[-3:]=="(2)" or name[-7:]=="(2).htm":
                    temp_name = name
                    name = name.replace("(2)", "")
                    os.rename(temp_dir+temp_name, temp_dir+name)
                if name==name1 or name==name1+".7z" or name==name1+".zip" or name==name1+".rar":
                    flag = 1
                    temp_path2 = temp_dir+name
                    names = os.listdir(EXTRACT_DIR)
                    if len(names)==0:
                        result = extract_file(temp_path2, EXTRACT_DIR, temp_data["密码"])
                    else:
                        raise Exception("解压文件夹已存在文件")
                    if result==-1:
                        flag = -1
                        print("文件解压失败："+url)
                        json_data2[url]=-200
                        shutil.rmtree(EXTRACT_DIR)
                        create_dir(EXTRACT_DIR)
                    else:
                        names = os.listdir(EXTRACT_DIR)
                        path1 = EXTRACT_DIR+names[0]
                        name2 = names[0].replace("／", "-")
                        if name1 not in name2:
                            name3 = "[{}] {}".format(name1, name2)
                        else:
                            name3=name2
                        path2 = EXTRACT_DIR + name3
                        os.rename(path1, path2)
                        shutil.move(path2, download_dir[1])
                        path3 = download_dir[1] + name3
                        key1 = name1[:name1.rfind("-")].replace("-", "/")
                        for temp_url in json_data1["链接"]:
                            if key1 in temp_url:
                                path4 = "{}{}/{}.lnk".format(download_dir[2], json_data1["链接"][temp_url], name3)
                                create_shortcut(path3, path4)
                        for tag in temp_data['标签']:
                            path4 = download_dir[3] + tag
                            create_dir(path4)
                            path4 = "{}/{}.lnk".format(path4, name3)
                            create_shortcut(path3, path4)
                        create_shortcut(path3, path4)
                        os.remove(temp_path2)
                        json_data2[url]=2
                    save_json(json_data2, BLOG_PATH)
                    break
        if flag==0:
            print("{}/{} {} 还未下载完成".format(number1, i+1, url))
        elif flag==1:
            print("{}/{} {} 解压成功".format(number1, i+1, url))
    # 删除下载错误的文件
    for temp_dir in save_dir:
        for name in os.listdir(temp_dir):
            if ".htm" in name:
                name1 = "https://www.nicezzz.com/archives/"+name.replace("-", "/")+"l"
                name1 = name1.replace("korean/realgraphic", "korean-realgraphic").replace("ai/girls", "ai-girls").replace("espacia/korea", "espacia-korea").replace("moon/night/snap", "moon-night-snap")
                if name1 in json_data2:
                    print(name)
                    json_data2[name1]=0
                    save_json(json_data2, BLOG_PATH)
                    file_path = temp_dir+name
                    os.remove(file_path)
            elif "." not in name:
                name1 = "https://www.nicezzz.com/archives/"+name.replace("-", "/")+".html"
                name1 = name1.replace("korean/realgraphic", "korean-realgraphic").replace("ai/girls", "ai-girls").replace("espacia/korea", "espacia-korea").replace("moon/night/snap", "moon-night-snap")
                if name1 in json_data2:
                    print(name)
                    file_path = temp_dir+name
                    file_size = os.path.getsize(file_path)
                    if file_size<50000:
                        json_data2[name1]=0
                        os.remove(file_path)
                    else:
                        json_data2[name1]=1
                    save_json(json_data2, BLOG_PATH)
    # 获取下载链接，使用迅雷下载文件
    print("下载链接：")
    number1 = len(json_data2)
    number2 = len(urls2)
    index1 = number1 - number2 + 1
    index2 = 0
    min_capacity = 10*1024*1024*1024
    while index2<number2:
        total_capacity, used_capacity, free_capacity = shutil.disk_usage("E:/")
        if free_capacity<min_capacity:
            print("磁盘空间不足")
            return 1
        url = urls2[index2]
        print("{}/{} {}".format(number1, index1, url))
        dowload_result = dowload_nicezzz_single_file(url)
        json_data2[url] = dowload_result
        save_json(json_data2, BLOG_PATH)
        index1+=1
        index2+=1
        if index2%50==0:
            os.system("taskkill /f /im chrome.exe")
    return 0


def browser_dowload_nicezzz_file():
    if check_chrome_statue():
        os.system("taskkill /f /im chrome.exe")
    else:
        command = r"C:/GoogleChromePortable/App/Chrome-bin/Chrome.exe --remote-debugging-port=9666 --user-data-dir=C:/GoogleChromePortable/Data/profile/"
        subprocess.Popen(command)
        time.sleep(5)

    json_data1 = read_json(BLOG_PATH)
    min_capacity = 10*1024*1024*1024
    dir1 = "C:/Users/wyh/AppData/Local/Temp/"
    with sync_playwright() as playwright:  # playwright = sync_playwright()不可用
        browser = playwright.chromium.connect_over_cdp("http://127.0.0.1:9666")
        context = browser.contexts[0]
        page = context.pages[0]
        for url in json_data1:
            if json_data1[url]==1: # 已获取下载链接，未完成下载
                total_capacity, used_capacity, free_capacity = shutil.disk_usage("E:/")
                if free_capacity<min_capacity:
                    print("磁盘空间不足")
                    return 1
                print(url)
                name1 = get_url_key(url)
                temp_path1 = BLOG_DIR + name1 + ".json"
                temp_data = read_json(temp_path1)
                try:
                    page.goto(temp_data["下载"], wait_until="commit")
                    while 1:
                        try:
                            html_data = page.content()
                        except:
                            pass
                        save_html(html_data, "temp_dir/temp.html")
                        soup1 = BeautifulSoup(html_data, features="lxml")
                        soup2 = soup1.find("a", attrs={"class": "input popsok"})
                        if soup2 is not None:
                            break
                    with page.expect_download() as download_info:
                        page.get_by_role("link", name="Download file").click()
                        download = download_info.value
                    print("文件正在拷贝中")
                    download.save_as(BROWSER_DIR+name1)
                except:
                    input("请确保文件已下载完成：")
                    for name in os.listdir(dir1):
                        if "playwright-artifacts-" in name:
                            dir2 = dir1 + name + "/"
                            names1 = os.listdir(dir2)
                            if len(names1)==1:
                                path1 = dir2 + names1[1]
                                path2 = dir2 + name1
                                os.rename(path1, path2)
                                shutil.move(path2, BROWSER_DIR)
                # 删除缓存
                for name in os.listdir(dir1):
                    if "playwright-artifacts-" in name:
                        dir2 = dir1 + name
                        shutil.rmtree(dir2)
                json_data1[url] = 1.5
                save_json(json_data1, BLOG_PATH)


def get_theoretical_number():
    with sync_playwright() as playwright:  # playwright = sync_playwright()不可用
        browser = playwright.chromium.launch_persistent_context(user_data_dir=USER_DATA_DIR[0], executable_path=EXECUTABLE_PATH[0], channel="chrome", headless=False)
        page = browser.new_page()
        url = HOME_URL
        number = [0, 0]
        while 1:
            print(url)
            page.goto(url, wait_until="commit")
            while 1:
                html_data = page.content()
                save_html(html_data, "temp_dir/temp.html")
                soup1 = BeautifulSoup(html_data, features="lxml")
                if url == HOME_URL:
                    soup2 = soup1.find_all("a", attrs={"class":"page-numbers"})
                    if len(soup2)>0:
                        url = soup2[2]["href"]
                        theoretical_number = int(url.replace("https://www.nicezzz.com/page/", ""))
                        theoretical_number = (theoretical_number-1)*20
                        break
                else:
                    soup2 = soup1.find("div", attrs={"class": "posts-row ajaxpager tab-pane fade in active"})
                    if soup2 is not None:
                        soup3 = soup2.find_all("div", attrs={"class":"item-thumbnail"})
                        if number[0]==0:
                            number[0] = len(soup3)
                        else:
                            time.sleep(10)
                            number[1] = len(soup3)
                        if number[1]!=0:
                            if number[0]!=number[1]:
                                time.sleep(10)
                                number=[number[1], 0]
                            else:
                                theoretical_number += len(soup3)
                                return theoretical_number
    return 0


def statistical_json_data():
    json_data1 = read_json(CLASS_PATH)
    json_data2 = read_json(BLOG_PATH)
    json_data1["统计"]={}
    for url in json_data1["链接"]:
        json_data1["统计"][json_data1["链接"][url]]=0
    class_number=0
    for url in json_data2:
        if "https://www.nicezzz.com/archives/" not in url:
            print("链接前缀异常:"+url)
        key1 = url.replace("https://www.nicezzz.com/archives/", "")
        key2 = key1[:key1.rfind("/")]
        key3 = "https://www.nicezzz.com/" + key2
        if key3 in json_data1["链接"]:
            class_number+=1
            json_data1["统计"][json_data1["链接"][key3]]+=1
        else:
            print("分类异常:"+url)
    json_data1["统计"]["分类总数"]=class_number
    json_data1["统计"]["实际总数"]=len(json_data2)
    json_data1["统计"]["理论总数"]=get_theoretical_number()
    save_json(json_data1, CLASS_PATH)
    for data in json_data1["统计"]:
        print("{} : {}".format(data, json_data1["统计"][data]))
    

def modify_blog_dir():
    for name in os.listdir(BLOG_DIR):
        path = BLOG_DIR+name
        data = read_json(path)
        print(data["标题"])
        data["标题"] = data["标题"].replace("/", "-")
        save_json(data, path)


def modify_blog_json():
    data1 = read_json(BLOG_PATH)
    # data2 = ["https://www.nicezzz.com/archives/rz/ugirls/279033.html","https://www.nicezzz.com/archives/siz/rosixz/279024.html"]

    # dir1 = "E:/迅雷下载/"
    # for name in os.listdir(dir1):
    #     if ".htm" in name:
    #         print(name)
    #         name1 = "https://www.nicezzz.com/archives/"+name.replace("-", "/")+"l"
    #         name1 = name1.replace("korean/realgraphic", "korean-realgraphic")
    #         if name1 in data1:
    #             data1[name1]=0
    #             save_json(data1, BLOG_PATH)
    #             path = dir1+name
    #             os.remove(path)

    dir1 = "E:/迅雷下载/"
    for name in os.listdir(dir1):
        name1 = "https://www.nicezzz.com/archives/"+name.replace("-", "/")+".html"
        name1 = name1.replace("korean/realgraphic", "korean-realgraphic")
        if name1 in data1:
            print(name)
            data1[name1]=1
            save_json(data1, BLOG_PATH)


def repair_blog_json():
    path1 = "download_blog.json"
    data1 = read_json(path1)
    path2 = "back_up/download_blog.json"
    data2 = read_json(path2)
    n1 = len(data1)
    if n1==0 and n1!=len(data2):
        with open(path1, encoding="utf-8") as file:
            value1 = file.read()
        i = value1.rfind("https://www.nicezzz.com/archives/")
        value11 = value1[:i]
        value12 = value1[i:]
        with open(path2, encoding="utf-8") as file:
            value2 = file.read()
        j = value2.rfind(value12)
        if j>=0:
            value22 = value2[j:]
            value3 = value11 + value22
            with open(path1, 'w', encoding="utf-8") as file:
                file.write(value3)
                file.close()
        pass
    else:
        save_json(data1, path2)


if __name__ == '__main__':
    # json过大有时会出现未保存完即被中断的情况
    repair_blog_json()

    # 下载网页分类
    # download_nicezzz_class()

    # 创建对应目录
    download_dir = "E:/呦糖社/"
    download_dir = [download_dir, download_dir+"文件/", download_dir+"分类/", download_dir+"标签/"]
    for temp_dir in download_dir:
        create_dir(temp_dir)
    json_data1 = read_json(CLASS_PATH)
    for class_name1 in json_data1["分类"]:
        dir1 = download_dir[2] + class_name1 + "/"
        create_dir(dir1)
        for class_name2 in json_data1["分类"][class_name1]:
            dir2 = dir1 + class_name2 + "/"
            create_dir(dir2)
    create_dir(XUNLEI_DIR)
    create_dir(EXTRACT_DIR)

    # 下载所有BLOG  2130*20+18=42540
    # dowload_nicezzz_blog(1, 1)
    
    # 下载文件
    # 2 已下载文件
    # 1.5 浏览器下载
    # 1 已获取链接 
    # -1 对不起，您已被屏蔽。您无法访问shrinkme.org。
    # -10 此档案仅限购买高级帐户的高级用户下载。
    # -100 安全验证失败，无法通过谷歌服务器获取验证码。
    # -120 当前无法使用此页面。
    # -130 链接已失效。
    # -140 rosefile.net 需要首先检查您的连接安全性。
    # -150 此文件已被用户删除，暂时无法访问！
    # -160 Gateway time-out, Error code 504
    # -165 Bad gateway, Error code 502
    # -170 Mediafire : generating new download key
    # -175 Mediafire : Well, looks like we can’t go any further
    # -176 Sorry, TeraBox is not available in current area
    # -180 无法访问此页面检查，https中是否存在拼写错误。如果拼写正确，请尝试运行 Windows 网络诊断。DNS_PROBE_FINISHED_NXDOMAIN
    # -200 解压文件夹失败
    # -300 下载链接不存在或已失效
    # dowload_nicezzz_single_file("https://www.nicezzz.com/archives/cosplya/279504.html")
    # dowload_nicezzz_single_file("https://www.nicezzz.com/archives/cosplya/5542.html")

    debug_mode = 1
    if debug_mode==0:
        while 1:
            try:
                result = dowload_nicezzz_file(download_dir)
                if result==1:
                    break
            except Exception as e:
                print(e)
                time.sleep(10)
    elif debug_mode==1:
        dowload_nicezzz_file(download_dir)
    else:
        browser_dowload_nicezzz_file()

    # C:/GoogleChromePortable/App/Chrome-bin/Chrome.exe --remote-debugging-port=9666 --user-data-dir=C:/GoogleChromePortable/Data/profile/
        
    # "C:/Program Files (x86)/Microsoft/Edge/Application/msedge.exe" --remote-debugging-port=9666 --user-data-dir="C:/Users/wyh/AppData/Local/Microsoft/Edge/User Data/"


    # 统计数据
    # statistical_json_data()

    # modify_blog_json()


    
    # shutil.unpack_archive("C:/Users/wyh/Downloads/[YALAYI雅拉伊]2023.08.14 NO.1060.rar", 'temp_dir', 'rar')

    # import zipfile
    # with zipfile.ZipFile("C:/Users/wyh/Downloads/[YALAYI雅拉伊]2023.08.14 NO.1060.rar", 'r') as zip_ref:
    #     zip_ref.extractall('temp_dir')

    # extract_nicezzz_file("C:/Users/wyh/Downloads/[YALAYI雅拉伊]2023.08.14 NO.1060.rar", "temp_dir")

    # extract_nicezzz_file("temp_dir/星之迟迟+VOL.14.rar", download_dir, "vx666878787")
    pass


# D:/Thunder/Program/Thunder.exe https://download1587.mediafire.com/qek8xhq3sxag4TUCW2EMPljMmoyZYmI8kd--cjbeDT5cNreWB8d9u0ELlyP3z7e9RSdtWyqFM8wSuUIjUugfdmiDAShIipEmJTFFWTheLucB30XVwja2djKpDTQevqRBcYlNVCeY1jN2hX29VLea9_hW8HCxp9zawrj-4rnHTySxIHI/ptlsial67atns59/%E6%98%9F%E4%B9%8B%E8%BF%9F%E8%BF%9F+VOL.14.rar


# https://blog.csdn.net/lilongsy/article/details/130560129
# https://www.iotword.com/5680.html