import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from crawler_log import CrawlerError
from crawler_log import LogType
from crawler_log import log
from crawler_log import save

def crawler():
    apple_catalog_urls = [
                          'https://developer.apple.com/tutorials/SwiftUI',
                          'https://developer.apple.com/tutorials/swiftui-concepts',
                          'https://developer.apple.com/tutorials/Sample-Apps'
                          ]
    flag = 0
    if flag == 0:
        options = webdriver.ChromeOptions()
        options.add_argument("--headless")
        options.add_argument("--disable-gpu")
        driver = webdriver.Chrome(chrome_options=options)
    else:
        driver = webdriver.Chrome()

    # url = "https://developer.apple.com/tutorials/swiftui/creating-and-combining-views"
    # driver.get(url)
    # print(driver.current_url)
    # time.sleep(5)
    # getContent(driver)

    for catalog_url in apple_catalog_urls:
        driver.get(catalog_url)
        time.sleep(5)
        convert_all_contents = getCatalog(driver)
        if len(convert_all_contents) <= 0:
            log(CrawlerError.catalogGetFail.value, LogType.error)
            print('error:' + catalog_url)
            driver.quit()
            continue
        # print('Target Catalog:' + str(convert_all_contents))
        for sub_url_str in convert_all_contents:
            driver.get(sub_url_str)
            print(driver.current_url)
            time.sleep(5)
            getContent(driver)
        time.sleep(1)
    driver.quit()

def getContent(driver: webdriver):
    url_str: str = driver.current_url
    all_contents = driver.find_elements(By.TAG_NAME, 'p') \
                   + driver.find_elements(By.TAG_NAME, 'h1') \
                   + driver.find_elements(By.TAG_NAME, 'h2')
    convert_all_contents = list(map(lambda x: x.text, all_contents))
    content_dic = {}
    for content in convert_all_contents:
        if len(content) == 0:
            continue
        content_dic[content] = "汉语翻译"
    name = url_str.split('/')[-1]
    path = url_str.split('/')[-2]
    save(content_dic, path, name)

def getCatalog(driver: webdriver) -> []:
    # xpath = '/html/body/div[2]/div[4]/main/div[2]/div/div[2]/div/section[1]/section/ol/li/a'
    xpath = '//*[@id="main"]/div[2]/div/div[2]/div/section[1]/section/ol/li/a'
    all_contents = driver.find_elements(By.XPATH, xpath)
    return list(map(lambda x: x.get_attribute('href'), all_contents))

