import time
from selenium import webdriver
from lxml import etree
from Util import Util
import XPath
import re
'''

/html/body/div[1]/div[3]/div[3]/div/div[2]/div[2]/table/tbody/tr[2]/td[2]/span
/html/body/div[1]/div[3]/div[3]/div/div[2]/div[2]/table/tbody/tr[3]/td[2]/span
/html/body/div[1]/div[3]/div[3]/div/div[2]/div[2]/table/tbody/tr[4]/td[2]/span
/html/body/div[1]/div[3]/div[3]/div/div[2]/div[2]/table/tbody/tr[51]/td[2]/span

/html/body/div[1]/div[3]/div[3]/div/div[2]/div[2]/table/tbody/tr[2]/td[2]/span



第一页
/html/body/div[1]/div[3]/div[3]/div/div[2]/div[3]/ul/li[3]/a

第二页
/html/body/div[1]/div[3]/div[3]/div/div[2]/div[3]/ul/li[4]/a


'''

# 把 "问题描述"列表 & "问题标签"列表 保存到文件里
def save2File(filename:str,descList:list,tagList:list):
    f = open(filename, "w+", encoding="utf8")

    for i in range(len(descList)):
        desc = re.sub("\t"," ",descList[i])
        tag = tagList[i]

        f.write(desc)
        f.write("\t")
        f.write(tag)
        f.write("\n")

    f.close()

# 根据一个url, 把相关的内容爬取下来
# 生成"问题描述"列表 & "问题标签"列表, 保存到文件中
def crawl(browser,url:str):
    # 保存的文件名
    file_name = "data/" + url.split('/')[-1] + ".txt"

    # 生成util对象
    util = Util(browser, url)

    pages = util.getElements(XPath.pagination)

    n_q = 1

    # 问题描述
    all_question_desc = []

    # 问题标签
    all_tags = []

    while True:

        # 跳到顶部
        browser.execute_script('window.scrollTo(0,0)')

        # 获取问题描述
        for q in util.getElements(XPath.questions)[:]:

            print("正在点击:问题-{}".format(n_q))
            n_q += 1

            q.click()

            time.sleep(3)

            util.switch()

            try:
                tmp = etree.HTML(util.getElement(XPath.question_desc).get_attribute("outerHTML"))
                tmp = tmp.xpath("string(.)")
                tmp = re.sub("\s+", ",", tmp)  # 把空白的东西干脆变成逗号
                all_question_desc.append(tmp)
            except:
                all_question_desc.append("试题不存在")

            util.close()
            util.ret()  # 不知道要不要回去

        # 获取问题标签
        for tag in util.getElements(XPath.question_tag)[:]:
            tags = util.getElements2(tag, XPath.tags)

            tmp = []
            for tag in tags:
                tmp.append(tag.text)
            _str = " ".join(tmp)
            all_tags.append(_str)

        # 这里修改了一下
        next_page = util.getElements(XPath.pagination_)[-2]
        attr = next_page.get_attribute("class")
        if attr == "txt-pager disabled":
            break
        else:
            util.getElement2(next_page, "./a").click()
            time.sleep(1)

    save2File(file_name, all_question_desc, all_tags)

    # print(all_question_desc)
    # print(all_tags)



####################################
### 把牛客网上的题目(描述+标签)爬下来
### 放在data目录下
### 一个txt文件表示一个题库,里面有多个问题
### 格式: 描述/t标签1 标签2
####################################
if __name__ == '__main__':


    # 要爬取的url
    urls = [
        "https://www.nowcoder.com/ta/coding-interviews", #剑指offer
        "https://www.nowcoder.com/ta/huawei",  #华为机试
        "https://www.nowcoder.com/ta/job-code", #题霸原创
        "https://www.nowcoder.com/ta/cracking-the-coding-interview", #面试金典
        "https://www.nowcoder.com/ta/programmer-code-interview-guide", #程序员代码面试指南
        "https://www.nowcoder.com/ta/classic-code", #经典必刷
        "https://www.nowcoder.com/ta/exam-all" #名企校招编程题
    ]



    # 加入本地缓存, 缓存的名字根据需要自己改
    options = webdriver.ChromeOptions()
    options.add_argument(r'--user-data-dir=C:\Users\wyz\AppData\Local\Google\Chrome\User Data')
    # options.add_argument('--headless')


    browser = webdriver.Chrome(chrome_options=options)


    for url in urls[:1]:
        crawl(browser, url)



    browser.quit()
    print("\nhello")



