import os
import time
import shutil
import subprocess
import json
import winshell
import requests
import wget
from bs4 import BeautifulSoup
from win32com.client import Dispatch
from playwright.sync_api import sync_playwright

headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35"}
UNIT = 1024.0 * 1024.0
requests.packages.urllib3.disable_warnings()


def read_json(path):
    data = {}
    with open(path, "r", encoding="utf-8") as file:
        try:
            data = json.load(file)
        except:
            pass
    return data


def save_json(save_value, save_path):
    with open(save_path, 'w', encoding='utf-8') as file:
        json.dump(save_value, file, indent=4, ensure_ascii=False)


def save_html(save_value, save_path, encoding="utf-8"):
    with open(save_path, 'w', encoding=encoding) as file:
        file.write(save_value + "\n")
        file.close()


def create_dir(file_dir):
    if not os.path.exists(file_dir):
        os.mkdir(file_dir)


def create_shortcut(original_path, lnk_path):
    # 注意路径一定要是\\，而非/
    lnk_path = lnk_path.replace("/", "\\")
    original_path = original_path.replace("/", "\\")
    if os.path.exists(lnk_path):
        os.remove(lnk_path)
    with winshell.shortcut(lnk_path) as link:
        link.path = original_path


def extract_file(file_path, extract_dir, password, show_info=0):
    if show_info==0:
        os.system('D:/7-Zip/7z.exe x "{}" -o"{}" -p"{}" -bso0'.format(file_path, extract_dir, password))
    else:
        os.system('D:/7-Zip/7z.exe x "{}" -o"{}" -p"{}"'.format(file_path, extract_dir, password))
    file_names =  os.listdir(extract_dir)
    if len(file_names)==1:
        names = extract_dir+"/"+file_names[0]
        if len(names)>0:
            return 1
    elif len(file_names)==2:
        n = []
        if file_names[0]=="女神研究所网址-说明文件 sssins.com.txt":
            n=[0, 1]
        elif  file_names[1]=="女神研究所网址-说明文件 sssins.com.txt":
            n=[1, 0]
        if len(n)==2:
            path1 = extract_dir+"/"+file_names[n[0]]
            path2 = extract_dir+"/"+file_names[n[1]]
            shutil.move(path1, path2)
            return 1
    return -1


def check_chrome_statue():
    output = subprocess.check_output('tasklist', shell=True)
    return 'chrome.exe' in str(output)


def wget_download(dowload_link, download_path):
    wget.download(dowload_link, download_path)
    pass


def xunlei_download(dowload_link, key):
    thunder = Dispatch('ThunderAgent.Agent64.1')
    if ".htm" in key:
        raise Exception("下载名称错误："+key)
    thunder.AddTask(dowload_link, key)
    thunder.CommitTasks()


def browser_download(dowload_link, download_path):
    
    with sync_playwright() as playwright:  # playwright = sync_playwright()不可用
        if check_chrome_statue():
            os.system("taskkill /f /im chrome.exe")
        else:
            command = r"C:/GoogleChromePortable/App/Chrome-bin/Chrome.exe --remote-debugging-port=9666 --user-data-dir=C:/GoogleChromePortable/Data/profile/"
            subprocess.Popen(command)
            time.sleep(5)
        browser = playwright.chromium.connect_over_cdp("http://127.0.0.1:9666")
        context = browser.contexts[0]
        page = context.pages[0]
        page.goto(dowload_link, wait_until="commit")
        page.pause()
        with page.expect_download() as download_info:
            page.get_by_role("link", name="Download file").click()
            download = download_info.value
        download.save_as(download_path)
        # while 1:
        #     html_data = page.content()
        #     save_html(html_data, "temp_dir/temp.html")
        #     # soup1 = BeautifulSoup(html_data, features="lxml")
        #     # soup2 = soup1.find("h1", attrs={"class":"article-title"})
        #     try:
                
        #         break
        #     except:
        #         page.pause()


def get_url_key(url):
    key = url.replace("https://www.nicezzz.com/archives/", "").replace(".html", "").replace("/", "-")
    return key


def find_blog_info(key):
    path = "blog_dir/"
    for name in os.listdir(path):
        path1 = path + name
        data = read_json(path1)
        if key in data["标题"] or key in data["链接"]:
            print(path1)
            print(data["链接"])
            print(data["标题"])
            key1 = get_url_key(data["链接"])
            name3 = "[{}] {}\n".format(key1, data["标题"])
            print(name3)

import os
 
def get_folder_size(folder_path):
    file_sizes = [os.path.getsize(os.path.join(folder_path, file_name)) for file_name in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, file_name))]
    return sum(file_sizes)

def check_download_blog():
    dir1 = "G:/呦糖社/文件/"
    for name in os.listdir(dir1):
        path1 = dir1 + name
        size1 = name[name.rfind("-")+1:]
        size1 = size1[:-1]
        if " " in size1:
            size1 = size1[size1.rfind(" ")+1:]
        if size1=="":
            print("----------------------------3 {}".format(path1))
            continue
        if size1[-1]=="G":
            size1 = size1[:-1]
            size1 = str(float(size1)*1024)
        elif size1[-2]=="G":
            size1 = size1[:-2]
            size1 = str(float(size1)*1024)
        elif size1[-1]=="M":
            size1 = size1[:-1]
            if size1=="":
                print("----------------------------1 {} {}MB".format(path1, size2))
                continue
            size1 = str(float(size1))
        elif size1[-2]=="M":
            size1 = size1[:-2]
            if size1=="":
                print("----------------------------1 {} {}MB".format(path1, size2))
                continue
            size1 = str(float(size1))
        else:
            print("----------------------------2 {} {}MB".format(path1, size2))
            continue
        size2 = get_folder_size(path1)
        size2 = str(float(size2/1024/1024))
        s = "{} {}MB {}MB".format(path1, size1, size2)
        size3 = float(size1)
        size4  = float(size2)
        rate = 100 * abs(size3 - size4) / size3
        if rate>3:
            print(s)



if __name__ == '__main__':

    # dowload_link = "https://download1474.mediafire.com/glighlex6kkg_xMFIdIacXGXk9yyR97_Glec-dF_j2_MSCcij4CalvP8_AK_zW2eNPqv9BfkJ5WP2biTCTSSfhxbukDQNmaEl6kl3rDfcEI2l4PbaINjAyUs4AiptM8JiyQvagaJEKBKaWdEWTEWMzWCSNnFpBNMoRrdsEkOWA/svuvceu96hxwdy6/%5BROSI%E5%8F%A3%E7%BD%A9%E7%B3%BB%E5%88%97%5D2023.06.07+NO.2551%5B60%2B1P%EF%BC%8F71.8MB%5D.rar"
    # download_path = "E:/浏览器下载/siz-rosikzxl-169463"
    # browser_download(dowload_link, download_path)

    # find_blog_info("Night Roads")

    check_download_blog()
    pass