import requests
import tldextract
import multiprocessing
from bs4 import BeautifulSoup
from common.config import config_option


def get_url():
    """获取URL"""
    r_file = open(config_option['project_path'] + "/data/domain.txt")
    url_list = [line.strip().split("\t")[0] for line in r_file.readlines()[1:]]
    r_file.close()
    return url_list


def get_top_domain(url_list):
    """获取顶级域名"""
    top_domain_list = []
    for url in url_list:
        result = tldextract.extract(url)
        top_domain = "{0}.{1}".format(result.domain, result.suffix)
        print("顶级域名：", top_domain)
        top_domain_list.append(top_domain)

    top_domain_set = list(set(top_domain_list))
    return top_domain_set


def crawl_top_domain(num, top_domain_list):
    """抓取顶级域名内容"""
    w_file = open(config_option['project_path'] + "/output/top_content" + str(num) + ".txt", "w", encoding="utf-8")

    for url in top_domain_list:
        try:
            print("URL:", url)
            result = requests.get("http://" + url)
            print("访问状态：", result.status_code)
            bs = BeautifulSoup(result.text, 'html.parser')
            title = bs.find('title').text.strip()
            print("网页主题：", title)
            w_file.write("\t".join((url, str(result.status_code), title)) + "\n")
        except Exception as e:
            print(e)
            w_file.write("\t".join((url, "NULL", "NULL")) + "\n")

    w_file.close()


def save_content(file_path, content_list):
    """保存内容"""
    w_file = open(file_path, "w", encoding="utf-8")
    for content in content_list:
        w_file.writelines("\t".join(content) + "\n")
    w_file.close()


"""
多进程运行
"""


def multi_process_run():
    # 线程数量
    process_num = 5

    # 获取顶级域名
    url_list = get_url()
    top_domain_list = get_top_domain(url_list)
    domain_num = len(top_domain_list)
    process_domain_num = int(domain_num / process_num)

    """多进程运行"""
    for i in range(process_num):
        start_pos = i * process_domain_num
        if i != process_num - 1:
            end_pos = (i + 1) * process_domain_num
        else:
            end_pos = domain_num

        print("起始位置：", start_pos, "终止位置：", end_pos)
        p = multiprocessing.Process(target=crawl_top_domain, args=(i, top_domain_list[start_pos: end_pos]))
        p.start()


if __name__ == '__main__':
    multi_process_run()
