from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
import json, os, time
import requests
import pandas as pd

baseUrl = "https://www.performanceassessmentresourcebank.org/bin/performance-tasks"
gaccount = "honkor@qq.com"
gpassword = "Xiaoyan1"
gTaskLimit = 549

gchromePath = "D:\\Chrome\\chromedriver.exe"
gfj = "./taskUrls.json"

def getDriver(chromePath = "D:\\Chrome\\chromedriver.exe"):
    option = webdriver.ChromeOptions()
    #option.add_argument('disable-infobars')
    option.add_argument('--ignore-certificate-errors')
    option.add_argument('--ignore-ssl-errors')
    service = Service(chromePath)
    driver = webdriver.Chrome(options= option, service=service)
    return driver

def findByXpath(driver, xpath):
    return driver.find_elements(By.XPATH, xpath)

def findById(driver, id):
    return driver.find_element(By.ID, id)

def moveClick(driver, node,sleep=1):
    #time.sleep(0.1)
    try:
        node.click()
    except:
        try:driver.execute_script("arguments[0].click();",node)
        except:
            driver.execute_script("arguments[0].scrollIntoView();",node)
            try:
                node.click()
            except:
                driver.execute_script("arguments[0].click();",node)
    if sleep:
        time.sleep(sleep)


def loginWeb(driver):
    logins = findByXpath(driver, "//a[contains(text(), 'Log in')]")
    moveClick(driver, logins[0])
    time.sleep(3)
    accountInput = findById(driver, "edit-name")
    accountInput.send_keys(gaccount)
    passwordInput = findById(driver, "edit-pass")
    passwordInput.send_keys(gpassword)
    findById(driver, "edit-submit--2").click()
    time.sleep(3)


def getTaskUrl(driver, taskUrls):
    newTasks = 0
    tasks = findByXpath(driver, "//h2[@class='box-teaser__title performance_tasks']")
    for task in tasks:
        aa = task.find_elements(By.XPATH, "./a")[0]
        title = aa.text
        url = aa.get_attribute("href")
        if title not in taskUrls:
            taskUrls[title] = [url]
            newTasks+=1
        elif url not in taskUrls[title]:
            taskUrls[title].append(url)
    print(f"got new tasks: {newTasks}, all tasks: {len(taskUrls)}")
    with open(gfj, "w", encoding='utf-8') as fw:
            json.dump(taskUrls, fw, indent = 2, ensure_ascii=False)
        
    return [newTasks, taskUrls]


def getTaskUrls(driver):
    taskUrls = {}
    if os.path.exists(gfj):
        try:
            taskUrls = json.load(open(gfj, "r", encoding='utf-8'))
        except Exception as e:
            print(e)
            os.remove(gfj)

    driver.get(baseUrl)

    newTasks, taskUrls = getTaskUrl(driver, taskUrls)
    time.sleep(3)
    while len(taskUrls) < gTaskLimit:
        #findByXpath(driver, "//li[class='pager-next first last']")[0].click()
        li = findByXpath(driver, "//a[contains(text(), 'Load more')]")[0]
        moveClick(driver, li)
        time.sleep(3)
        newTasks, taskUrls = getTaskUrl(driver, taskUrls)
        if newTasks == 0: 
            li = findByXpath(driver, "//a[contains(text(), 'Load more')]")[0]
            moveClick(driver, li)
            time.sleep(5)
            newTasks, taskUrls = getTaskUrl(driver, taskUrls)

        print(f"got new tasks: {newTasks}, all tasks: {len(taskUrls)}")


    print(f"total tasks: {len(taskUrls)}")


def checkUrls():
    taskUrls = json.load(open(gfj, "r", encoding='utf-8'))
    for task in taskUrls:
        if len(taskUrls[task]) > 1:
            print(f"{task}: {len(taskUrls[task])}")

def downloadByUrl(url, file):
    print(f"downloading {file} from {url}")
    content = requests.get(url).content
    if len(content) > 512:
        with open(file, 'wb') as f:
            f.write(content)
    else:
        print(f"下载失败 {len(content)}，{file}, {url}")


def getTaskDetail(driver):
    details = {}
    divbox = driver.find_elements(By.XPATH, "//div[@class='box-detail__task__items']")[0]
    divItems = divbox.find_elements(By.XPATH, "./div")
    itemtexts = []
    if "RATE" in divItems[-1].text:
        divItems = divItems[:-1]
    for item in divItems:
        itemtexts.append(item.text)
    details["items"] = itemtexts
    content = driver.find_elements(By.XPATH, "//div[@class='box-detail__content']")[0].text
    details["content"] = content

    downloads = driver.find_elements(By.XPATH, "//div[@class='box-download']")
    dldic = {}
    for download in downloads:
        label = download.find_elements(By.XPATH, "./label")[0].text
        lis = download.find_elements(By.XPATH, "./div/div/ul/li")
        dldic[label] = []
        for li in lis:
            a = li.find_elements(By.XPATH, "./span/a")[0]
            href = a.get_attribute("href")
            dldic[label].append([a.text, href])

    details["downloads"] = dldic

    divStandard = driver.find_elements(By.XPATH, "//div[@class='box-detail-standards box-detail__standards']")[0]
    label = divStandard.find_elements(By.XPATH, "./label")[0].text
    spans = divStandard.find_elements(By.XPATH, "./span")
    standards = []
    for span in spans:
        lis = span.find_elements(By.XPATH, "./div/ul/li")
        for li in lis:
            standards.append(li.text)
    details["standards"] = standards

    ability = driver.find_elements(By.XPATH, "//div[@class='box-detail-critical_ability box-detail__critical_ability']")
    if len(ability) > 0:
        spans = ability[0].find_elements(By.XPATH, "./span")
        abilities = []
        for span in spans:
            abilities.append(span.text)
        details["abilities"] = abilities

    return details


def getTaskDetails(driver):
    if not os.path.exists(gfj):
        getTaskUrls(driver)
    taskUrls = json.load(open(gfj, "r", encoding='utf-8'))
    driver.get(baseUrl)
    #loginWeb(driver) #自动登陆可能有问题
    time.sleep(30) #手动登陆
    taskDetails = {}
    fdj = "taskDetails.json"
    if os.path.exists(fdj):
        taskDetails = json.load(open(fdj, "r", encoding='utf-8'))
    dirhtml = "html"
    os.makedirs(dirhtml, exist_ok=True)

    for t,task in enumerate(taskUrls):
        if task in taskDetails: continue
        taskDetails[task] = []
        print(f"getting {t+1} {task} details")
        for i, url in enumerate(taskUrls[task]):
            f=task.replace("/", "_").replace("?","").replace(":", "").replace("-", "_").replace("\\","_").replace('"',"").replace("'","")
            htmlfile = os.path.join(dirhtml, f"{f}_{i}.html")
            if not os.path.exists(htmlfile):
                driver.get(url)
                time.sleep(3)
                # script = f"document.execCommand('SaveAs', false, '{htmlfile}')"
                # driver.execute_script(script)
                html = driver.page_source
                with open(htmlfile, "w", encoding="utf-8") as f:
                    f.write(html)
            else:
                absPath = os.path.abspath(htmlfile)
                driver.get("file:///" + absPath)
            details = getTaskDetail(driver)
            taskDetails[task].append(details)
            time.sleep(3)
                
        with open(fdj, "w", encoding='utf-8') as fw:
            json.dump(taskDetails, fw, indent = 2, ensure_ascii=False)
        print(f"{i} {task} got details")
    print(f"total tasks: {len(taskDetails)}")


def getDownloads(dedic,taskdic):
    downloads = dedic["downloads"]
    for download in downloads:
        if "COMPLETE RESOURCE BUNDLE" in download:continue
        dls=downloads[download]
        dltexts = []
        for dl in dls:
            dltexts.append("\n".join(dl))
        if download not in taskdic:
            taskdic[download] = []
        taskdic[download].append("\n".join(dltexts))

def taskToExcel(fe = "performAssessTasks.xlsx"):
    fdj = "taskDetails.json"
    details = json.load(open(fdj, "r", encoding='utf-8'))
    keys = ['task', 'content', 'TYPE OF TASK', 'SUBJECT', 'GRADE LEVEL', 'GRADE LEVEL SPAN', 'DURATION', 'STUDENT COLLABORATION', 'SOURCE', 'AUTHOR', 'standards', 'abilities', 'Rating', 'DOWNLOAD RESOURCE', 'SCORING GUIDES/RUBRICS', 'STUDENT WORK SAMPLES', 'BENCHMARK SAMPLES', 'COURSE']
    taskdic = {x:[] for x in keys}
    curLen = 0
    for task in details:
        
        detail = details[task]
        
        for i, d in enumerate(detail):
            task1 = task
            if len(detail) > 1:
                task1= task + "_其" + str(i+1)
            taskdic["task"].append(task1)
            curLen = len(taskdic["task"])
            items = d["items"]
            for item in items[:-1]:
                #print(f"{task1} {item}")
                title,content=item.split("\n")
                if title not in taskdic:
                    taskdic[title] = []
                taskdic[title].append(content)
            taskdic["Rating"].append(items[-1])
            taskdic["content"].append(d["content"])
            getDownloads(d,taskdic)
            if "standards" in d:
                taskdic["standards"].append("\n".join(d["standards"]))
            if "abilities" in d:
                taskdic["abilities"].append("\n".join(d["abilities"]))
            for key in taskdic:
                if len(taskdic[key]) < curLen:
                    taskdic[key].append("")

    print(f"total tasks: {len(taskdic['task'])}, keys: {taskdic.keys()}")

    df = pd.DataFrame(taskdic)
    df.to_excel(fe, index=False)

def normalizeName(name1):
    name1 = name1.replace("/", "_").replace("?","").replace(":", "").replace("\\","_").replace('"',"").replace("'","")
    return name1 

def taskDownloads():
    fdj = "taskDetails.json"
    details = json.load(open(fdj, "r", encoding='utf-8'))
    root = ".\\downloads"
    os.makedirs(root, exist_ok=True)
    dln = 0
    successn = 0
    for t, task in enumerate(details):
        print(f"downloading task {t+1} {task}")
        detail = details[task]
        for i, d in enumerate(detail):
            task1 = task
            if len(detail) > 1:
                task1= task + "_其" + str(i+1)
            taskDir =  os.path.join(root, normalizeName(task1))
            downloads = d["downloads"]
            for download in downloads:
                if "COMPLETE RESOURCE BUNDLE" in download:continue

                downloadDir = os.path.join(taskDir, normalizeName(download))
                os.makedirs(downloadDir, exist_ok=True)
                dls = downloads[download]
                for dl in dls:
                    title, url = dl
                    f1 = os.path.join(downloadDir, normalizeName(title))
                    suffix = url.split(".")[-1]
                    f1+="."+suffix
                    #f1 = os.path.abspath(f1)
                    if not os.path.exists(f1):
                        try:
                            downloadByUrl(url, f1)
                            successn+=1
                        except Exception as e:
                            print(e)
                    dln+=1
        print(f"task {task1} downloads: {successn}/{dln}")
    print(f"total downloads: {successn}/{dln}")


def main():
    #driver = getDriver(gchromePath) # 获取浏览器
    #driver.maximize_window() # 最大化浏览器
    # getTaskUrls(driver) # 获取任务链接
    # getTaskDetails(driver) # 获取任务详情

    #taskToExcel() # 任务详情转excel
    taskDownloads()

if __name__ == "__main__":
    main()
