# -*- coding:utf-8 -*-
# --author： jingfeng 
# time: 2018/12/8



from lxml import etree
import requests
import queue
import re
import threading
import time

old_email_set = set()
url_set =set()

def get_every_data(html):
    alllist = []
    mylist1 = []
    mylist2 = []

    mylist1 = get_all_http(html)
    try:
        if len(mylist1) > 1:
            mylist2 = getabsurl(mylist1, html)

            return mylist2

    except:
        pass


def getabsurl(url, html):
    try:
        regex = re.compile('href=\"(.*?)"')

        httplist = regex.findall(html)

        newttplist = httplist.copy()  # 深拷贝一份

        for data in newttplist:
            if data.find("http://") != -1:
                httplist.remove(data)

            if data.find("javascript") != -1:
                httplist.remove(data)

        host_name = gethostname(url)

        if host_name != None:
            for i in range(len(httplist)):
                httplist[i] = host_name + httplist[i]

        return httplist
    except Exception as e:
        print(e)


def gethostname(httpstr):
    try:
        emailregex = re.compile(r"(http://.*?)/")

        mylist = emailregex.findall(str(httpstr))

        if len(mylist) == 0:
            return None
        else:
            return mylist[0]

    except:
        return None


def get_all_http(html):
    try:
        find_url = re.findall(r"(http://\S*?)[\"|>|]", html)

        return find_url
    except:
        pass


def get_data(url):
    try:
        response = requests.get(url)
        html = response.text
        return html
    except Exception as e:
        pass


def get_all_email(html):
    try:

        email_reg = re.compile(r'([A-Z0-9a-z]+@[a-zA-z0-9.]+\.[A-Za-z0-9]{2,4})')
        email_list = re.findall(email_reg, html)
        return email_list
    except Exception as e:
        pass


def BFS(url, email_queue, url_queue):  # 广度遍历


    page_data = get_data(url)
    email_list = get_all_email(page_data)

    try:
        if len(email_list) != 0:
            for email in email_list:
                print(email)

                if email in old_email_set:
                    pass

                else:
                    old_email_set.add(email)

                    email_queue.put(email)


    except:
        pass

    urllist = get_every_data(page_data)

    try:
        if len(urllist) != 0:

            for myurl in urllist:
                if myurl in url_set:
                    pass
                else:
                    url_queue.put(myurl)

    except:
        pass


def save_email():
    global email_queue

    email_file = open('email4.txt', 'w', encoding='utf-8')

    while not email_queue.empty():
        data = email_queue.get()
        email_file.write(str(data) + '\n')
        email_file.flush()

    email_file.close()


email_queue = queue.Queue()  # 邮箱队列

url_queue = queue.Queue()  # url  队列

sem = threading.Semaphore(10)  # 最大线程100个

# 5秒后开启保存邮箱的线程
time_thread = threading.Timer(5, save_email)
time_thread.start()


def BFS_go(email_queue, url_queue):

    global sem
    with sem:

        while True:
            time.sleep(10)
            while not url_queue.empty():

                for i in range(100):
                    url = url_queue.get()
                    threading.Thread(target=BFS, args=(url, email_queue, url_queue)).start()



BFS('http://bbs.tianya.cn/post-140-393973-1.shtml', email_queue, url_queue)

BFS_go(email_queue, url_queue)
