import requests
from bs4 import BeautifulSoup
import json
import time

if __name__ == '__main__':
    # first_url = 'https://toefl.xdf.cn/202111/11229930.html'
    # res = requests.get(first_url, timeout=30)
    # soup = BeautifulSoup(res.text, 'html.parser')
    #
    # second_urls = []
    # for a_ele in soup.select('table a'):
    #     second_urls.append(a_ele.attrs.get("href"))
    #
    # third_urls = []
    # for second_url in second_urls:
    #     res = requests.get(second_url, timeout=30)
    #     soup = BeautifulSoup(res.text, 'html.parser')
    #     select_as = soup.select('table a')
    #     while len(select_as) == 0:
    #         print("url=" + second_url + " is null res=" + res.text)
    #         res = requests.get(second_url, timeout=30)
    #         soup = BeautifulSoup(res.text, 'html.parser')
    #         select_as = soup.select('table a')
    #         continue
    #     for a_ele in select_as:
    #         third_urls.append(a_ele.attrs.get("href"))
    # print(json.dumps(third_urls))
    third_urls = json.loads('["http://toefl.xdf.cn/202110/11225205.html", "http://toefl.xdf.cn/202110/11225207.html", "http://toefl.xdf.cn/202110/11225208.html", "http://toefl.xdf.cn/202110/11225210.html", "http://toefl.xdf.cn/202110/11225211.html", "http://toefl.xdf.cn/202110/11225212.html", "http://toefl.xdf.cn/202110/11225213.html", "http://toefl.xdf.cn/202110/11225214.html", "http://toefl.xdf.cn/202110/11225215.html", "http://toefl.xdf.cn/202110/11225216.html", "http://toefl.xdf.cn/202110/11225217.html", "http://toefl.xdf.cn/202110/11225218.html", "http://toefl.xdf.cn/202110/11225219.html", "http://toefl.xdf.cn/202110/11225220.html", "http://toefl.xdf.cn/202110/11225222.html", "http://toefl.xdf.cn/202110/11225223.html", "http://toefl.xdf.cn/202110/11225224.html", "http://toefl.xdf.cn/202110/11225225.html", "http://toefl.xdf.cn/202110/11225226.html", "http://toefl.xdf.cn/202110/11225228.html", "http://toefl.xdf.cn/202110/11225496.html", "http://toefl.xdf.cn/202110/11225497.html", "http://toefl.xdf.cn/202110/11225498.html", "http://toefl.xdf.cn/202110/11225499.html", "http://toefl.xdf.cn/202110/11225500.html", "http://toefl.xdf.cn/202110/11225501.html", "http://toefl.xdf.cn/202110/11225502.html", "http://toefl.xdf.cn/202110/11225503.html", "http://toefl.xdf.cn/202110/11225504.html", "http://toefl.xdf.cn/202110/11225505.html", "http://toefl.xdf.cn/202110/11225507.html", "http://toefl.xdf.cn/202110/11225508.html", "http://toefl.xdf.cn/202110/11225509.html", "http://toefl.xdf.cn/202110/11225528.html", "http://toefl.xdf.cn/202110/11225529.html", "http://toefl.xdf.cn/202110/11225530.html", "http://toefl.xdf.cn/202110/11225601.html", "http://toefl.xdf.cn/202110/11225602.html", "http://toefl.xdf.cn/202110/11225603.html", "http://toefl.xdf.cn/202110/11225604.html", "http://toefl.xdf.cn/202110/11227487.html", "http://toefl.xdf.cn/202110/11227493.html", "http://toefl.xdf.cn/202110/11227495.html", "http://toefl.xdf.cn/202110/11227497.html", "http://toefl.xdf.cn/202110/11227498.html", "http://toefl.xdf.cn/202110/11227500.html", "http://toefl.xdf.cn/202110/11227501.html", "http://toefl.xdf.cn/202110/11227503.html", "http://toefl.xdf.cn/202110/11227504.html", "http://toefl.xdf.cn/202110/11227519.html", "http://toefl.xdf.cn/202110/11227520.html", "http://toefl.xdf.cn/202110/11227521.html", "http://toefl.xdf.cn/202110/11227523.html", "http://toefl.xdf.cn/202110/11227524.html", "http://toefl.xdf.cn/202110/11227526.html", "http://toefl.xdf.cn/202110/11227528.html", "http://toefl.xdf.cn/202110/11227529.html", "http://toefl.xdf.cn/202110/11227530.html", "http://toefl.xdf.cn/202110/11227531.html", "http://toefl.xdf.cn/202110/11227532.html", "http://toefl.xdf.cn/202110/11228650.html", "http://toefl.xdf.cn/202110/11228651.html", "http://toefl.xdf.cn/202110/11228652.html", "http://toefl.xdf.cn/202110/11228653.html", "http://toefl.xdf.cn/202110/11228654.html", "http://toefl.xdf.cn/202110/11228655.html", "http://toefl.xdf.cn/202110/11228656.html", "http://toefl.xdf.cn/202110/11228657.html", "http://toefl.xdf.cn/202110/11228658.html", "http://toefl.xdf.cn/202110/11228659.html", "http://toefl.xdf.cn/202110/11228661.html", "http://toefl.xdf.cn/202110/11228663.html", "http://toefl.xdf.cn/202110/11228664.html", "http://toefl.xdf.cn/202110/11228665.html", "http://toefl.xdf.cn/202110/11228666.html", "http://toefl.xdf.cn/202110/11228667.html", "http://toefl.xdf.cn/202110/11228668.html", "http://toefl.xdf.cn/202110/11228669.html", "http://toefl.xdf.cn/202110/11228670.html", "http://toefl.xdf.cn/202110/11228671.html", "http://toefl.xdf.cn/202111/11229813.html", "http://toefl.xdf.cn/202111/11229814.html", "http://toefl.xdf.cn/202111/11229815.html", "http://toefl.xdf.cn/202111/11229816.html", "http://toefl.xdf.cn/202111/11229817.html", "http://toefl.xdf.cn/202111/11229818.html", "http://toefl.xdf.cn/202111/11229819.html", "http://toefl.xdf.cn/202111/11229820.html", "http://toefl.xdf.cn/202111/11229821.html", "http://toefl.xdf.cn/202111/11229822.html", "http://toefl.xdf.cn/202111/11229823.html", "http://toefl.xdf.cn/202111/11229824.html", "http://toefl.xdf.cn/202111/11229825.html", "http://toefl.xdf.cn/202111/11229826.html", "http://toefl.xdf.cn/202111/11229827.html", "http://toefl.xdf.cn/202111/11229828.html", "http://toefl.xdf.cn/202111/11229829.html", "http://toefl.xdf.cn/202111/11229831.html", "http://toefl.xdf.cn/202111/11229832.html", "http://toefl.xdf.cn/202111/11229833.html", "http://toefl.xdf.cn/202111/11229922.html", "http://toefl.xdf.cn/202111/11229923.html", "http://toefl.xdf.cn/202111/11229924.html", "http://toefl.xdf.cn/202111/11229925.html", "http://toefl.xdf.cn/202111/11229926.html", "http://toefl.xdf.cn/202111/11229927.html", "http://toefl.xdf.cn/202111/11229928.html", "http://toefl.xdf.cn/202111/11229929.html"]')
    print(json.dumps(third_urls))
    words_dict = {}
    words_list = {}
    for third_url in third_urls:
        select_tr = []
        while len(select_tr) == 0:
            res = requests.get(third_url, timeout=30)
            soup = BeautifulSoup(res.text, 'html.parser')
            select_tr = soup.select('table tr')
            time.sleep(1)
            continue
        for tr_ele in select_tr:
            tds = tr_ele.select("td")
            word = tds[1].text.replace('\n', '').replace('\t', '')
            index = tds[0].text.replace('\n', '').replace('\t', '')
            cls = tds[2].text.replace('\n', '').replace('\t', '')

            if words_list.get(index) is None:
                words_list[index] = word
            if words_dict.get(words_list[index]) is None:
                words_dict[words_list[index]] = cls
                continue
            if "v" in cls:
                words_dict[words_list[index]] = cls
            if "adv" in cls:
                words_dict[words_list[index]] = cls
            if "adj" in cls:
                words_dict[words_list[index]] = cls

    # print(words_dict)
    with open('text/word-index.txt', 'w') as file:
        for index in range(1, 5400):
            if words_list.get(str(index)) is not None:
                file.writelines([str(index)+' '+words_list[str(index)]+"\n"])
    with open('text/words.txt', 'w') as file:
        for key in words_dict:
            file.writelines([key + ' ' + words_dict[key] + "\n"])


