import requests
from bs4 import BeautifulSoup
from lxml import etree
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
#实现无可视化界面
from selenium.webdriver.chrome.options import Options
#实现规避检测
from selenium.webdriver import ChromeOptions

categoryLinkArray=[]
allUrl=[]
baseUrl="https://fairygui.com"


def TestBS4():
    url = "https://www.baidu.com/"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36"
    }
    resp = requests.get(url=url, headers=headers)
    html_doc=resp.content.decode("utf-8")
    # soup = BeautifulSoup(html_doc, 'html.parser')
    soup = BeautifulSoup(html_doc, 'lxml')
    # print(soup.prettify())
    # print(soup.find("div", class_="wrapper_new").previous_element)
    # print(soup.get_text())
    # print(soup.select("div.s_tab"))
    # print(soup.select("div.s_tab>div>a")[1])
    # print(soup.select("div.s_tab>div a")) #空格多个层级
    # print(soup.select("div.s_tab>div a")[0].text)
    # print(soup.select("div.s_tab>div a")[0])
    # print(soup.select("div.s_tab>div a")[0]["href"])
    print(soup.select("div.s_tab>div")[0].a)

def TestXPathFunc():
    # etree.parse("./test.html")
    url = "https://www.baidu.com/"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36"
    }
    resp = requests.get(url=url, headers=headers)
    html_doc = resp.content.decode("utf-8")
    # print(html_doc)
    tree=etree.HTML(html_doc)
    titile=tree.xpath("/html/head/title")
    # print(tree.xpath('//*[ @ id = "s_tab"]//a/text()')[0])
    # print(tree.xpath('//*[ @ id = "s_tab"]//a'))
    # print(tree.xpath('//*[ @ id = "s_tab"]//a/text()'))
    # print(tree.xpath('//*[ @ id = "s_tab"]//a/@href'))
    print(tree.xpath('//div[@class="wrapper_new"]/div[@id="head"]/div/@id'))


def TestSeleniumFunc():
    option = webdriver.ChromeOptions()
    # 实现无可视化界面的操作
    option.add_argument('--headless')
    option.add_argument('--disable-gpu')

    # 实现规避检测
    option.add_experimental_option('useAutomationExtension', False)
    option.add_experimental_option('excludeSwitches', ['enable-automation'])
    option.add_experimental_option("detach", True)
    driver = webdriver.Chrome(options=option)# 启动浏览器
    driver.get("https://www.fairygui.com/docs/editor")  # 打开某个网址
    html_doc=driver.page_source
    # driver.switch_to.frame("frameid") #有iframe标签要切换才能查找
    print(html_doc)









def readFairyGuiCategory():
    option = webdriver.ChromeOptions()
    # 实现无可视化界面的操作
    option.add_argument('--headless')
    option.add_argument('--disable-gpu')

    # 实现规避检测
    option.add_experimental_option('useAutomationExtension', False)
    option.add_experimental_option('excludeSwitches', ['enable-automation'])
    option.add_experimental_option("detach", True)
    driver = webdriver.Chrome(options=option)  # 启动浏览器
    driver.get(baseUrl+"/docs/editor")  # 打开某个网址
    html_doc = driver.page_source
    # print(html_doc)
    soup = BeautifulSoup(html_doc, 'lxml')

    # 获取所有分类目录URl
    allcategoryLink = soup.select(".category>a")
    # print(allcategoryLink)
    for categoryLink in allcategoryLink:
        fullcategoryLink=baseUrl+categoryLink["href"]
        # print(fullcategoryLink)
        categoryLinkArray.append(fullcategoryLink)



def readFairyGuiPic(url):
    option = webdriver.ChromeOptions()
    # 实现无可视化界面的操作
    option.add_argument('--headless')
    option.add_argument('--disable-gpu')

    # 实现规避检测
    option.add_experimental_option('useAutomationExtension', False)
    option.add_experimental_option('excludeSwitches', ['enable-automation'])
    option.add_experimental_option("detach", True)
    driver = webdriver.Chrome(options=option)  # 启动浏览器
    driver.get(url)  # 打开某个网址
    html_doc = driver.page_source
    # print(html_doc)


    # 根据上面的目录链接,获取图片

    soup = BeautifulSoup(html_doc, 'lxml')
    allLink = soup.select("div.page-link>a")
    for link in allLink:
        fullurl = "https://www.fairygui.com" + link["href"]
        allUrl.append(fullurl)

    for i in range(len(allUrl)):
        everyPage = allUrl[i]
        # print(everyPage)
        time.sleep(2)
        driver.get(everyPage)
        everyhtmldoc = driver.page_source
        soup = BeautifulSoup(everyhtmldoc, 'lxml')
        allpicLink = soup.select("p>img")
        # print(allpicLink)
        for picSrc in allpicLink:
            # print(picSrc["src"])
            imgName = picSrc["src"].split('/')[2]
            # print(imgName)

            fullpicurl = "https://www.fairygui.com" + picSrc["src"]
            headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36"
            }
            r = requests.get(
                fullpicurl,
                headers=headers)
            # wb 以二进制打开文件并写入，文件名不存在会创建
            with open('./docs_images/' + imgName, 'wb') as f:
                f.write(r.content)  # 写入二进制内容
    # driver.close()
    # driver.quit()  # 关闭浏览器



if __name__ == '__main__':
    # TestBS4()
    # TestXPathFunc()
    # TestSeleniumFunc()


    readFairyGuiCategory()
    for url in categoryLinkArray:
        readFairyGuiPic(url)