'''
使用协程 线程池 来 抓取angelio companys 信息
'''
import requests
import logging
from bs4 import BeautifulSoup
import json
from pymongo import MongoClient
import asyncio
import aiohttp
import redis
import random
from multiprocessing.dummy import Pool, Manager


def get_recorder_logger(log_name):
    my_logger = logging.getLogger(log_name)
    my_logger.setLevel(logging.DEBUG)
    if not my_logger.handlers:
        # logging format
        fmt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        # filehandler
        fh = logging.FileHandler('log/%s' % log_name)
        fh.setFormatter(fmt)
        fh.setLevel(logging.INFO)
        my_logger.addHandler(fh)
        # StreamHandler
        sh = logging.StreamHandler()
        sh.setFormatter(fmt)
        sh.setLevel(logging.DEBUG)
        my_logger.addHandler(sh)

    return my_logger


class FIND_SOUP():
    def __init__(self, soup):
        self.soup = soup
        if soup:
            if soup.string:
                self.string = soup.string.strip()
            else:
                self.string = None
        else:
            self.string = None

    def find(self, *args, **kw):
        if not self.soup:
            return FIND_SOUP(None)
        ret = self.soup.find(*args, **kw)
        if ret:
            return FIND_SOUP(ret)
        return FIND_SOUP(None)

    def find_all(self, *args, **kw):
        if not self.soup:
            return []
        ret = self.soup.find_all(*args, **kw)
        return ret

    def find_next_sibling(self, *args, **kw):
        if not self.soup:
            return None
        ret = self.soup.find_next_sibling(*args, **kw)
        return ret

    def get_text(self):
        if self.soup:
            return self.soup.get_text().strip()
        return None

    def get(self, *args, **kw):
        if self.soup:
            return self.soup.get(*args, **kw)
        return None


class BASE:
    @staticmethod
    def del_none(dic):
        no_list = []
        for k, v in dic.items():
            if not v or v == '':
                no_list.append(k)
        for k in no_list:
            del(dic[k])

    def get_data(self):
        BASE.del_none(self.ret_dict)
        return self.ret_dict


class COMPANY(BASE):

    def __init__(self, html):
        soup = BeautifulSoup(html, 'lxml')
        try:
            self.ret_dict = {}
            self.ret_dict.update(self.get_summary(soup))
            self.ret_dict.update(self.get_product(soup))
            self.ret_dict.update(self.get_action(soup))
            self.ret_dict.update(self.get_portfolio(soup))
            self.ret_dict.update(self.get_founders(soup))
            self.ret_dict.update(self.get_past_financing(soup))
            self.ret_dict.update(self.get_team(soup))
            self.ret_dict.update(self.find_static(soup))
        except Exception as e:
            MY_LOG.exception(e)
            self.ret_dict = {}

        self.get_data()

    def get_summary(self, soup):
        summary_soup = soup.find('div', class_='summary')
        if not summary_soup:
            return {}
        summary_soup = FIND_SOUP(summary_soup)
        data_dic = {}
        data_dic['is_exit'] = summary_soup.find('div', 'copy').string
        data_dic['page_name'] = summary_soup.find('h1', 'name').string
        data_dic['acquired'] = summary_soup.find('div', 'acquired').string
        data_dic['describe'] = summary_soup.find('h2', 'high_concept').string
        data_dic['tags'] = [FIND_SOUP(node).string.strip(
        ) for node in summary_soup.find('div', 'tags').find_all('a')]
        return data_dic

    def get_product(self, soup):
        main_soup = soup.find('div', class_='panes_container')
        if not main_soup:
            return {}
        main_soup = FIND_SOUP(main_soup)
        data = main_soup.find(
            'div', class_='startups-show-sections').get('data-startup')
        if not data:
            return {}
        dic = json.loads(data)

        return dic

    def get_portfolio(self, soup):
        main_soup = soup.find('div', class_='panes_container')
        if not main_soup:
            return {}
        main_soup = FIND_SOUP(main_soup)
        data = main_soup.find('div', class_='startup_roles').get('data-roles')
        if not data:
            return {}
        dic = json.loads(data)
        return {'portfolio': dic}

    def get_founders(self, soup):
        main_soup = soup.find('div', class_='panes_container')
        if not main_soup:
            return {}
        main_soup = FIND_SOUP(main_soup)
        main_soup = main_soup.find('div', 'founders')
        ret_list = []
        for each in main_soup.find_all('li', class_='role'):
            each = FIND_SOUP(each)
            data_json = each.find('div').get('data-startup_role')
            if data_json:
                data_dic = json.loads(data_json)
                data_dic['name'] = each.find(
                    'div', class_='name').find('a').string
                data_dic['role_title'] = each.find(
                    'div', class_='role_title').get_text()
                data_dic['bio'] = each.find('div', class_='bio').get_text()
                ret_list.append(data_dic)
        return {'founders': ret_list}

    def get_action(self, soup):
        def handel_url(url):
            if not url or not url.startswith('http'):
                return None
            return url

        summary_soup = soup.find('div', class_='actions')
        if not summary_soup:
            return {}
        summary_soup = FIND_SOUP(summary_soup)
        ret_dict = {}
        ret_dict['twitter_url'] = handel_url(
            summary_soup.find('a', 'twitter_url').get('href'))
        ret_dict['facebook_url'] = handel_url(
            summary_soup.find('a', 'facebook_url').get('href'))
        ret_dict['linkedin_url'] = handel_url(
            summary_soup.find('a', 'linkedin_url').get('href'))
        ret_dict['blog_url'] = handel_url(
            summary_soup.find('a', 'blog_url').get('href'))
        ret_dict['company_url'] = handel_url(
            summary_soup.find('a', 'company_url').get('href'))

        ret_dict['followers_count'] = summary_soup.find('div', 'count').string
        return ret_dict

    def _get_role(self, soup):
        soup = FIND_SOUP(soup)
        role_list = []
        sum_role = 0
        sum_investors = soup.find('a', 'view_all').string
        if sum_investors:
            try:
                sum_role = int(sum_investors.split(' ')[2])
            except:
                MY_LOG.debug('err with : %s' % sum_investors)
                sum_role = 0

        if soup:
            for role_soup in soup.find_all('li'):
                role_soup = FIND_SOUP(role_soup)
                url = role_soup.find('div', 'name').find('a').get('href')
                if url:
                    role_list.append(url)
        sum_role = len(role_list) if sum_role == 0 else sum_role
        return role_list, sum_role

    def get_team(self, soup):

        summary_soup = soup.find('div', 'team')
        if not summary_soup:
            return {}

        ret_dict = {}
        employees_section = summary_soup.find(
            'h4', attrs={'data-tips_selector': 'employees_section'})
        if employees_section:
            ret_dict['employees'] = {}
            group = employees_section.find_next_sibling('div')
            role_list, sum_role = self._get_role(group)
            ret_dict['employees']['roles'] = role_list
            ret_dict['employees']['sum'] = sum_role

        employees_section = summary_soup.find(
            'h4', attrs={'data-tips_selector': 'past_employees_section'})
        if employees_section:
            ret_dict['past_employees'] = {}
            group = employees_section.find_next_sibling('div')
            role_list, sum_role = self._get_role(group)
            ret_dict['past_employees']['roles'] = role_list
            ret_dict['past_employees']['sum'] = sum_role

        employees_section = summary_soup.find(
            'h4', attrs={'data-tips_selector': 'advisors_section'})
        if employees_section:
            ret_dict['advisors'] = {}
            group = employees_section.find_next_sibling('div')
            role_list, sum_role = self._get_role(group)
            ret_dict['advisors']['roles'] = role_list
            ret_dict['advisors']['sum'] = sum_role

        employees_section = summary_soup.find(
            'h4', attrs={'data-tips_selector': 'attorneys_section'})
        if employees_section:
            ret_dict['attorneys'] = {}
            group = employees_section.find_next_sibling('div')
            role_list, sum_role = self._get_role(group)
            ret_dict['attorneys']['roles'] = role_list
            ret_dict['attorneys']['sum'] = sum_role

        employees_section = summary_soup.find(
            'h4', attrs={'data-tips_selector': 'board_members_section'})
        if employees_section:
            ret_dict['board_members'] = {}
            group = employees_section.find_next_sibling('div')
            role_list, sum_role = self._get_role(group)
            ret_dict['board_members']['roles'] = role_list
            ret_dict['board_members']['sum'] = sum_role
        # print(ret_dict)
        return ret_dict

    def find_static(self, soup):
        statck_soup = soup.find('h3', 'stack')
        if not statck_soup:
            return {}
        statck_soup = statck_soup.find_next_sibling('div')

        ret_dict = []
        category_temp = {}
        stack_component_tabs = statck_soup.find('div', 'stack-component-tabs')
        if stack_component_tabs:
            for span in stack_component_tabs.find_all('span'):
                category_temp[span.get('data-category')] = span.string

        for li in statck_soup.find_all('li', 'stack-component'):
            li = FIND_SOUP(li)
            ret_static = {}
            ret_static['name'] = li.find('div', 'name').get_text()
            ret_static['category'] = li.find('div', 'category').get_text()
            ret_static['show_category'] = category_temp.get(
                li.get('data-category'))
            ret_dict.append(ret_static)

        return {'stack': ret_dict}

    def get_past_financing(self, soup):
        summary_soup = soup.find('div', class_='past_financing')
        if not summary_soup:
            return {}
        summary_soup = FIND_SOUP(summary_soup)
        past_financing = []
        for each in summary_soup.find('ul', 'startup_rounds').find_all('li'):
            each = FIND_SOUP(each)
            data_dic = {}
            data_dic['is_exit'] = each.find('div', 'copy').string
            data_dic['type'] = each.find('div', 'type').string
            data_dic['date_display'] = each.find('div', 'date_display').string
            data_dic['raised'] = each.find('div', 'raised').string or each.find(
                'div', 'raised').find('a').string
            data_dic['participant'] = {}
            data_dic['participant']['participants'] = []
            data_dic['participant']['sum_participants'] = 0
            sum_participants = each.find('a', 'more_participants_link').string
            if sum_participants:
                try:
                    data_dic['participant']['sum_participants'] = int(
                        sum_participants.split(' ')[2])
                except:
                    MY_LOG.debug('err with sum_participants : %s' %
                                 sum_participants)
                    data_dic['participant']['sum_participants'] = 0

            for participant in each.find_all('div', 'participant'):
                participant = FIND_SOUP(participant)
                name_url = participant.find(
                    'div', 'name').find('a').get('href')
                if name_url:
                    data_dic['participant']['participants'].append(name_url)
            past_financing.append(data_dic)

        # past_investor = {}
        # past_investor['roles'] = []
        # past_investor['sum'] = 0
        # investors_soups = summary_soup.find('div', attrs={'data-role':"past_investor"})
        # sum_investors = investors_soups.find('a', 'view_all').string
        # if sum_investors:
        #     try:
        #         past_investor['sum'] = int(sum_investors.split(' ')[2])
        #     except:
        #         MY_LOG.debug('err with sum_investors : %s'%sum_investors)
        #         past_investor['sum'] = 0

        # if investors_soups:
        #     for role_soup in investors_soups.find_all('li'):
        #         url = role_soup.find('div','name').find('a').get('href')
        #         if url:
        #             past_investor['roles'].append(url)

        investors_soups = summary_soup.find(
            'h4', attrs={'data-tips_selector': 'past_investors_section'})
        if investors_soups:
            past_investor = {}
            group = investors_soups.find_next_sibling('div')
            role_list, sum_role = self._get_role(group)
            past_investor['roles'] = role_list
            past_investor['sum'] = sum_role

        return {'past_financing': past_financing, 'past_investor': past_investor}


def check_page(html):
    if 'Followers'in html and 'Activity' in html and 'Overview' in html:
        company(html)
    elif 'Investments' in html:
        person(html)


def get_all_url(collection):
    ret_list = {each.get('angel_url')
                for each in collection.find({}, {'_id': 0, 'angel_url': 1})}
    return ret_list


async def get_page(work_urls):
    ip_port = await get_ipproxy()
    conn = aiohttp.ProxyConnector(proxy="http://%s" % ip_port)
    session = aiohttp.ClientSession(connector=conn)

    while 1:
        angel_url = work_urls.get()
        if angel_url == 'end_kill':
            work_urls.put('end_kill')
            break

        repeat_num = 0
        empty_num = 0
        while True:
            repeat_num += 1
            try:
                with aiohttp.Timeout(35):
                    async with session.get(angel_url, headers=HEADERS) as r:
                        text = await r.text()
                        if r.status == 404:
                            LOG_404.info(angel_url)
                            break
                        assert r.status == 200
                        if 'Your IP is blocked, kid.'not in text and 'AngelList' in text:
                            company = COMPANY(text)
                            data_dic = company.get_data()
                            data_dic['angel_url'] = angel_url
                            if not data_dic.get('page_name'):
                                raise RuntimeError('page_name is None')
                            DATA_TARGET.insert_one(data_dic)
                            print('successfu get %s' % angel_url)
                            break
            except RuntimeError as e:
                empty_num += 1
                await asyncio.sleep(60)
            except aiohttp.errors.ClientError as e:
                PROXY_LOG.error('client error: %s' % ip_port)
                await asyncio.sleep(30)
            except aiohttp.errors.DisconnectedError as e:
                PROXY_LOG.error('disconnect error: %s' % ip_port)
                await asyncio.sleep(30)
            except asyncio.TimeoutError as e:
                TIMEOUT_LOG.error('TimeoutError : %s' % angel_url)
                await asyncio.sleep(60)
            except AssertionError as e:
                STATUS_LOG.error('%s : %s' % (angel_url, r.status))
                await asyncio.sleep(10)
            except aiohttp.errors.HttpProcessingError as e:
                PROXY_LOG.error('too much connection : %s' % (angel_url,))
                await asyncio.sleep(10)
            except Exception as e:
                UNKNOW_LOG.exception(e)
                UNKNOW_LOG.debug("fail: %s" % angel_url)
                await asyncio.sleep(10)

            if empty_num > 7:
                EMPTY_LOG.error('page is empty : %s' % angel_url)
                break

            if repeat_num > 5:
                del_ipproxy(ip_port)
                ip_port = await get_ipproxy()
                conn = aiohttp.ProxyConnector(proxy="http://%s" % ip_port)
                session = aiohttp.ClientSession(connector=conn)
                repeat_num = 0
                REPEAT_LOG.info(angel_url)


async def get_ipproxy():
    global REDIS
    while True:
        count = REDIS.zcount('ipproxy:3', 0, 20)
        if count == 0:
            print('wait 60s no proxy')
            await asyncio.sleep(60)
            continue
        my_id = random.randint(0, count)
        ret = REDIS.zrange('ipproxy:3', my_id, my_id)
        if not ret:
            continue
        return ret[0].decode('utf8')


def del_ipproxy(proxy):
    global REDIS
    REDIS.zrem('ipproxy:3', proxy)


MY_LOG = get_recorder_logger('ang_log')
UNKNOW_LOG = get_recorder_logger('unknow_log')
EMPTY_LOG = get_recorder_logger('empty_log')
STATUS_LOG = get_recorder_logger('status_log')
TIMEOUT_LOG = get_recorder_logger('timeout_log')
REPEAT_LOG = get_recorder_logger('repeat_log')
PROXY_LOG = get_recorder_logger('proxy_log')
LOG_404 = get_recorder_logger('404_log')


DB = MongoClient('192.168.0.220', 27017)['angellist']
DATA_SOURCE = DB['companys']
DATA_TARGET = DB['companys_detail']
REDIS_Pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
REDIS = redis.StrictRedis(connection_pool=REDIS_Pool)
ASYNC_NUM = 30
THREAD_NUM = 2

HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36'}


def work(work_queue):
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    tasks = [asyncio.ensure_future(get_page(work_queue)) for i in range(ASYNC_NUM)]
    loop.run_until_complete(asyncio.wait(tasks))
    loop.close()


def main():
    source_urls = get_all_url(DATA_SOURCE)
    finush_urls = get_all_url(DATA_TARGET)
    work_urls = source_urls - finush_urls
    manager = Manager()
    work_queue = manager.Queue()

    for url in work_urls:
        work_queue.put(url)
    work_queue.put('end_kill')

    pool = Pool(THREAD_NUM)

    for i in range(THREAD_NUM):
        pool.map_async(work, (work_queue,))

    pool.close()
    pool.join()

    print('end')


    print('end')


def test():
    # import requests
    # req = requests.get('https://angel.co/uber', timeout=20)
    # html = req.text
    # with open('temp','wt')as f:
    #     f.write(req.text)
    with open('temp', 'rt')as f:
        html = f.read()
    company = COMPANY(html)
    import json
    print(json.dumps(company.get_data()))

if __name__ == '__main__':
    # test()
    main()
