import requests
from time import sleep
import os
import selenium
from selenium import webdriver

# 运行流程
# 首先读取1.txt
# 遍历1.txt里的url，并且去掉含有黑名单字符串的url
# 对每个url提取a标签写入2.txt
# 然后用quChong函数去掉2.txt里的重复项
# 再用classify把2.txt里的url分类
# 含有.edu.的写入last.txt
# 不含有.edu.的写入maybe.txt

def step(url):
    #browser = webdriver.Firefox()
    options = webdriver.FirefoxOptions()
    options.add_argument("--headless")
    browser = webdriver.Firefox(firefox_options=options)
    try:
        browser.get(url)
        sleep(3)
        file = open(path2, 'a+')
        a_tags = browser.find_elements_by_tag_name('a')
        for a in a_tags:

            href = a.get_attribute('href')

            if href != None and href != '' and href.startswith('http'):
                file.write(href + '\n')

        file.close()
    except selenium.common.exceptions.WebDriverException:
        pass
    # 退出浏览器
    sleep(2)
    browser.close()

def Traverse1txt():
    fifter_black_list(path1)
    file = open(path1, 'r')
    lines = file.readlines()
    i = 0
    all = len(lines)

    for line in lines:
        print(str(i/all*100)+"%")
        step(line[:-1])
        i+=1

    quChong()
    classify()

def quChong():

    file = open(path2, 'r')
    lines = file.readlines()
    file.close()
    sets = set(lines)
    file = open(path2, 'w')
    for s in sets:
        file.write(s)
    file.close()

def classify():
    file = open(path2, 'r')
    lines = file.readlines()
    file.close()
    maybe = open(path3, 'a+')
    last = open(path4, 'a+')

    for line in lines:
        if line.count('.edu.')==1:
            last.write(line)
        else:
            maybe.write(line)
    maybe.close()
    last.close()

def txt2_to_txt1():
    os.remove(path1)
    os.rename(path2,path1)

def load_black_list():

    file = open(path5, 'r')
    blacks = file.readlines()
    prefix = []

    file.close()
    for i in range(len(blacks)):
        blacks[i] = blacks[i][:-1]
        prefix.append('.' + blacks[i])
        prefix.append('//' + blacks[i])
    return prefix

def fifter_black_list(path):
    blacks = load_black_list()
    print(blacks)
    print(len(blacks))

    file = open(path, 'r')
    lines = file.readlines()
    file.close()
    file = open(path, 'w')

    for line in lines:
        is_black = 0
        for black in blacks:
            if line.count(black) == 1:
                is_black = 1
                break
        if is_black == 0:
            file.write(line)
    file.close()

path1 = r'C:\Users\Administrator\Desktop\1.txt'
path2 = r'C:\Users\Administrator\Desktop\2.txt'
path3 = r'C:\Users\Administrator\Desktop\maybe.txt'
path4 = r'C:\Users\Administrator\Desktop\last.txt'
path5 = r'C:\Users\Administrator\Desktop\black_list.txt'

for i in range(2):

    Traverse1txt()
    txt2_to_txt1()

