import random

from bs4 import BeautifulSoup
import requests
from urllib.parse import unquote
import download_img
from urllib.parse import urlencode
import time
import json
import random
import proxy_list

url = "https://learn.lianglianglee.com/"
headers = {
    "authority": "learn.lianglianglee.com",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "no-cache",
    "cookie": "_ga=GA1.1.1126974100.1695797603; lastPath=/专栏; cf_clearance=2PQsrdYd7fFjGoapthqhJugjKKA8VF5ZMKzIN7Jo64o-1699168141-0-1-5d141f74.f785cc84.a154094-0.2.1699168141; _ga_NPSEEVD756=GS1.1.1699167196.8.1.1699168142.56.0.0".encode("utf-8").decode("latin1"),
    "pragma": "no-cache",
    "referer": "https://learn.lianglianglee.com/",
    "sec-ch-ua": "\\\"Google Chrome\\\";v=\\\"117\\\", \\\"Not;A=Brand\\\";v=\\\"8\\\", \\\"Chromium\\\";v=\\\"117\\\"",
                                                                                 "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"macOS\"",
                                 "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "same-origin",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
    "Accept-Encoding": "deflate, gzip"
}
proxy_index = 0
proxy = proxy_list.proxies[proxy_index]

def _dfs_doc (url, firstDir):
    response = requests.request("GET", url + firstDir, headers=headers)
    soup = BeautifulSoup(response.text, 'lxml')
    _adjust_page_link(soup)
    _save_by_url(firstDir + ".html", soup.prettify())
    select = soup.select(".menu-item")
    first_item = select[0]
    print(first_item)
    return select


def _sub_doc (url, path):
    response = requests.request("GET", url + path, headers=headers)
    soup = BeautifulSoup(response.text, 'lxml')
    select = _adjust_page_link(soup)
    _save_by_url(path + ".html", soup.prettify())
    return select


def _save_by_url(path, content):
    download_img.makedir(path)
    with open(path, "w") as f:
        f.write(content)
        f.close()

def _adjust_page_link(soup):
    select = soup.select(".menu-item")
    for menuItem in select:
        menuItem.attrs['href'] = menuItem.attrs['href'].replace(".md", ".html")
    return select
firstDir = "专栏"
listItem = _dfs_doc(url, firstDir)
itemIndex = 0
def changeProxy():
    proxy_index.__add__(1)
    proxy = proxy_list.proxies[proxy_index]

for subModule in listItem:
    if not subModule.get("id").__contains__("Spring编程常见错误50例"):
        continue
    listItemSub = _sub_doc(url, firstDir + "/" + subModule.get('id'))
    index = 0
    for subItem in listItemSub:
        index = index + 1
        if index < 5:
            continue
        while True:
            time.sleep(2 + random.randrange(start=20, stop=100, step=9) / 100.0)
            fileName = unquote(subItem.get('id'), 'utf-8')
            fullUrl = url + firstDir + "/" + subModule.get('id') + "/" + fileName
            responseLeaf = requests.request("GET", fullUrl, headers=headers, proxies=proxy, timeout=2)
            if responseLeaf.status_code != 200:
                print("ip 不可靠， 切换代理")
                _save_by_url('bugs', responseLeaf.text)
                changeProxy()
                continue
            break
        soup = BeautifulSoup(responseLeaf.text, 'lxml')
        for img in soup.select("img"):
            pathOfUrl = unquote(img.get('src'), 'utf-8')
            if pathOfUrl.index("/") == 0:
                pathOfSave = pathOfUrl[1:]
                pathOfUrl = pathOfSave
            else:
                pathOfUrl = firstDir + "/" + subModule.get('id') + "/" + pathOfUrl
                pathOfSave = pathOfUrl

            download_img.download_image(url + pathOfUrl, pathOfSave)
        _adjust_page_link(soup)
        _save_by_url(firstDir + "/" + subModule.get('id') + "/"
                     + fileName.replace(".md", ".html"), soup.prettify())
        print(fileName)


# for a in soup.select(".menu-item"):
#     href = a.get('href')
#     print(href)

