from fileinput import filename
import os
from turtle import title
import requests
from bs4 import BeautifulSoup
import tkinter as tk
from tkinter import filedialog, messagebox, ttk
import threading
import re
from datetime import datetime
import json
import webbrowser

with open('COMMANDARGS.json', 'r', encoding='utf-8') as f:
    url = json.load(f)["du"]
    directory = json.load(f)["d"]
    filename = json.load(f)["fn"]
    title = json.load(f)["tt"]
    download_directory = json.load(f)["dd"]

response = requests.get(url)
if response.status_code == 200:
    file_path = os.path.join(directory, f"{filename}.pdf")
    with open(file_path, 'wb') as file:
        file.write(response.content)
    messagebox.showinfo("下载完成", f"{title}.pdf 下载完成")
else:
    messagebox.showerror("下载失败", f"{title}.pdf 下载失败")

def download_smartedu_content(url):
    content_id_match = re.search(r'contentId=([^&]+)', url)
    if content_id_match:
        content_id = content_id_match.group(1)
        title_url = f"https://s-file-2.ykt.cbern.com.cn/zxx/ndrv2/resources/tch_material/details/{content_id}.json"
        headers={
        'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0'
    }
        response = requests.get(url=title_url,headers=headers)
        json_data = response.text
        title_match = re.search(r'"title"\s*:\s*"([^"]+)"', json_data)
        if title_match:
            title_tag = title_match.group(1)
            if title_tag:
                title = title_tag.strip()  # 获取网页标题并去除两端空格
            else:
                title = None
            if title:
                    sub_directory = title + " " + datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
            else:
                sub_directory = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')

            valid_sub_directory = "".join(c if c.isalnum() or c.isspace() else "_" for c in sub_directory)
            full_path = os.path.join(download_directory, valid_sub_directory)
            os.makedirs(full_path, exist_ok=True)
            download_url = f"https://r2-ndr.ykt.cbern.com.cn/edu_product/esp/assets/{content_id}.pkg/pdf.pdf"
            download_file(download_url, full_path, valid_sub_directory, title)
        else:
            messagebox.showwarning("Warning", "30000")
    
def download_pep_content(url):
    response = requests.get(url)
    response.encoding = response.apparent_encoding
    soup = BeautifulSoup(response.text, 'lxml')
    #创建文件
    title_tag = soup.title
    if title_tag:
        title = title_tag.string.strip()  # 获取网页标题并去除两端空格
    else:
        title = "Untitled"

    valid_title = "".join(c if c.isalnum() or c.isspace() else "_" for c in title)
    full_path = os.path.join(download_directory, valid_title)
    os.makedirs(full_path, exist_ok=True)

    #提取id
    pattern = r"/(\d+)/mobile/index\.html"
    match = re.search(pattern, url)
    if match:
        book_id = match.group(1)
        js_url = "https://book.pep.com.cn/"+book_id+"/mobile/javascript/config.js"
        #提取页面
        js_response = requests.get(js_url)
        if js_response.status_code == 200:
            js_content = js_response.text
            pattern = r"bookConfig\.totalPageCount\s*=\s*(\d+);"
            match = re.search(pattern, js_content)
            if match:
                page_count = match.group(1)
                if int(page_count) > 0:
                    def download_pages():
                        for page_number in range(1, int(page_count) + 1):
                            page_url = f"https://book.pep.com.cn/{book_id}/files/mobile/{page_number}.jpg"
                            response = requests.get(page_url)
                            if response.status_code == 200:
                                file_path = os.path.join(full_path, f"{page_number}.jpg")
                                with open(file_path, 'wb') as file:
                                    file.write(response.content)
                    threading.Thread(target=download_pages).start() 
                else:
                    messagebox.showwarning("Warning", "40000")
            else:
                messagebox.showwarning("Warning", "40001")
        else:
            messagebox.showwarning("Warning", "40002")
    else:
        messagebox.showwarning("Warning", "40003")

def download_file(url, directory, filename, title):
    response = requests.get(url)
    if response.status_code == 200:
        file_path = os.path.join(directory, f"{filename}.pdf")
        with open(file_path, 'wb') as file:
            file.write(response.content)
        messagebox.showinfo("下载完成", f"{title}.pdf 下载完成")
    else:
        messagebox.showerror("下载失败", f"{title}.pdf 下载失败")