from urllib.request import (urlopen, Request)
import aiohttp
import random
import re
from bs4 import BeautifulSoup
import asyncio


class Searcher:
    """搜索网址和邮箱的程序"""

    # 这里没有使用 Google 的网站
    # search_host = "http://ggg363.firstguo.com"
    search_host = "http://cn.bing.com"

    # 随机的邮箱（哈哈，如果没有抓取到）
    # emails = ["sales", "info", "support", "admin", "mailhq", "xb0", "power"]

    # 邮箱的正则表达式
    EMAIL_RE = "^(\w)+(\.\w+)*@(\w)+((\.\w+)+)$"
    MAIL_TO_RE = "mailto:([^?/]*)"

    def __init__(self, task=None, count=0, host="http://cn.bing.com"):
        self.task = task
        self.total = count
        self.search_host = host

    def search(self, row):
        """搜索网址和邮箱"""
        el = asyncio.new_event_loop()
        try:
            result = el.run_until_complete(self.query_website(row))
            if result:
                self.task.signal_progress.emit(result["id"], self.total, None)
                self.task.signal_dict.emit(result)
        except Exception as e:
            el.stop()
            self.task.signal_exception.emit(e)

    async def query_website(self, data):
        """查询网址"""
        cmp_name = str(data["name"]).lower()
        bs4 = await self.async_search(self.strip(cmp_name, False))
        cites = bs4.find_all('cite')
        site = def_site = None
        for cite in cites:
            # 查找包含关键词的网址
            cmp_name = cmp_name.replace(" ", "")
            name = self.strip(self.get_host_name(cite.text))
            if name in cmp_name or cmp_name in name:
                site = cite.text
                break
            # 查找类似 http(s)://xxx/ 的网址
            if not def_site:
                mat = re.match(r'^(http(s)?://)?[^/\\›]*/?$', cite.text, re.I)
                if mat:
                    def_site = mat.string
        if not site:
            site = def_site
        print("cmp_name: %s, website: %s" % (cmp_name, site))
        emails = []
        if site:
            emails = await self.query_email(site)
        emails = emails + [''] * 3
        return {"id": data["id"], "site": site, "emails": emails}

    async def query_email(self, url):
        """查询邮箱"""
        origin_url = url
        if "www." in url:
            url = url[url.index('www.') + 4:]
        if '/' in url:
            url = url[0:url.index('/')]
        url = '@%s' % url
        bs4 = await self.async_search(url)
        emails = re.findall("[\w]+%s" % url, bs4.get_text(), re.I)
        if emails:
            emails = list(set(emails))
        if len(emails) < 3:
            emails.extend(await self.find_email_from_site(origin_url))
        emails = list(set(emails))
        print(emails)
        # if len(emails) < 3:
        #     emails = ["%s%s" % (e, url) for e in random.sample(self.emails, 3)]
        return emails

    async def find_email_from_site(self, url):
        if "http" not in url:
            url = "http://%s" % url
        emails = []
        try:
            bs4 = await self.async_wget(url)
            links = bs4.find_all(name="a", text=re.compile("contact|about", re.I))
            if links:
                links = list(set([link["href"] for link in links]))
                for link in links:
                    mail_to = re.match(self.MAIL_TO_RE, link)
                    if mail_to:
                        emails.append(mail_to.group(1))
                        continue
                    if "http" not in link:
                        link = url + "/" + link
                    print(link)
                    bs4 = await self.async_wget(link)
                    emails.extend(bs4.find_all(text=re.compile(self.EMAIL_RE)))
        except Exception as e:
            print(e)
        return emails

    def get_host_name(self, url):
        """获取url的域名的名字"""
        if '.' in url:
            url = url[(url.index('.') + 1):]
        if '.' in url:
            url = url[0:url.index('.')]
        return url

    def sync_search(self, key):
        """Google 搜索关键词"""
        url = '%s/search?hl=en&q=%s' % (self.search_host, key)
        req = Request(url)
        req.add_header("user-agent", str(random.randint(0, 1000000)))
        resp = urlopen(req)
        return BeautifulSoup(resp.read(), "lxml")

    async def async_search(self, key):
        """Google 搜索关键词"""
        print("search: %s" % key)
        url = '%s/search?q=%s' % (self.search_host, key)
        print(url)
        # url = 'https://www.baidu.com/'
        return await self.async_wget(url)

    async def async_wget(self, url):
        async with aiohttp.ClientSession() as session:
            headers = {
                "user-agent": str(random.randint(0, 1000000))}
            async with session.get(url, headers=headers, timeout=10) as resp:
                print("http-status: %d" % resp.status)
                return BeautifulSoup(str(await resp.read()), "lxml")

    async def check_network_status(self):
        """检查网络状态"""
        url = self.search_host
        async with aiohttp.ClientSession() as session:
            headers = {"user-agent": str(random.randint(0, 1000000))}
            async with session.get(url, headers=headers) as resp:
                return resp.status

    def strip(self, key, strip_useless=False):
        r"""replace ['.,"\-_+\d] to ''"""
        key = re.sub(r'[\'.,"_+\d&\-()/]+', ' ', key, 0, re.I)
        if strip_useless:
            key = re.sub(r'limited|c o|cropo?r?a?t?i?o?n?|import?|export?', '', key, 0, re.I)
            key = re.sub(r'crop|inc|llc|(co[\s]*)?ltd', '', key, 0, re.I)
        return key.lower()


if __name__ == "__main__":
    # print(Searcher().wget("a"))
    loop = asyncio.get_event_loop()
    loop.run_until_complete(Searcher().query_website({"id": 1, "name": "design inc"}))
