#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2016-06-29 19:05:15
# Project: angel_pages

from pyspider.libs.base_handler import *
import re
from pymongo import MongoClient
DB = MongoClient('192.168.0.220', 27017)['angel_log']

class Handler(BaseHandler):
    crawl_config = {
        'headers': {
            'User-Agent': 'GoogleBot',
        },
           'cookies':{'Cookie':'__utmt=1; ajs_group_id=null; ajs_user_id=%222375006%22; ajs_anonymous_id=%220a21f39deff67ad9bc77932f102f56ef%22; __utma=242977874.1732595352.1465890637.1467192352.1467273597.17; __utmb=242977874.2.10.1467273597; __utmc=242977874; __utmz=242977874.1467192352.16.9.utmcsr=people|utmccn=(not%20set)|utmcmd=(not%20set); __utmv=242977874.|2=created_at=20160614=1^3=investor=false=1^4=talent%20profile=false=1^5=q=2=1; mp_6a8c8224f4f542ff59bd0e2312892d36_mixpanel=%7B%22distinct_id%22%3A%20%221554de40451816-0bc483021bf773-3e70055f-1fa400-1554de404528f5%22%2C%22utm_source%22%3A%20%22redirect%22%2C%22utm_campaign%22%3A%20%22angellist_com%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%2C%22%24search_engine%22%3A%20%22google%22%2C%22__mps%22%3A%20%7B%7D%2C%22__mpso%22%3A%20%7B%7D%2C%22__mpa%22%3A%20%7B%7D%2C%22__mpu%22%3A%20%7B%7D%2C%22__mpap%22%3A%20%5B%5D%2C%22__alias%22%3A%20%222375006%22%2C%22%24email%22%3A%20%22326737833%40qq.com%22%2C%22%24name%22%3A%20%22Chen%20Kong%22%2C%22%24username%22%3A%20%22chen-kong%22%2C%22angel%22%3A%20%22false%22%2C%22candidate%22%3A%20%22false%22%2C%22roles%22%3A%20%5B%0A%20%20%20%20%22designer%22%2C%0A%20%20%20%20%22software%20engineer%22%0A%5D%2C%22quality_ceiling%22%3A%20%222%22%7D; _angellist=0a21f39deff67ad9bc77932f102f56ef; mp_mixpanel__c=1; mp_mixpanel__c3=1227; mp_mixpanel__c4=1227; mp_mixpanel__c5=5'}
    }
    
    retry_delay = {
        0: 30,
        1: 1*60*60,
        2: 6*60*60,
        3: 12*60*60,
        '': 18*60*60
    }

    #@every(minutes=24 * 60)
    def on_start(self):
        self.crawl('https://angel.co/google', callback=self.index_page, fetch_type='js',)

        
    @config(age=10 * 24 * 60 * 60)
    def index_page(self, response):
        for each in response.doc('a[href^="http"]').items():
            if re.match("https://angel.co/[\w-]*$", each.attr.href, re.U):
                self.crawl(each.attr.href, callback=self.index_page,  fetch_type='js',)
                
        if CHECK_PAGE(response.text) == 1:
            ret_dic = company(response.text, response.url).get_data()
            ret_dic['angel_url'] = response.url
            ret_dic['angel_type'] = 'company'
            return ret_dic
        elif CHECK_PAGE(response.text) == 2:
            ret_dic = person(response.text, response.url).get_data()
            ret_dic['angel_url'] = response.url
            ret_dic['angel_type'] = 'person'
            return ret_dic
        else:
            DB['NO_Entity'].insert_one(response.url)
            

    

    
# my work-----------
    
import requests
import logging
from bs4 import BeautifulSoup
import json

class FIND_SOUP():
    def __init__(self,soup):
        self.soup = soup
        if soup:
            if soup.string:
                self.string = soup.string.strip()
            else:
                self.string = None
        else:
            self.string = None

    def find(self, *args, **kw):
        ret = self.soup.find(*args, **kw)
        if ret:
            return FIND_SOUP(ret)
        return FIND_SOUP(None)

    def find_all(self,*args, **kw):
        ret = self.soup.find_all(*args, **kw)
        return ret

    def get_text(self):
        if self.soup:
            return self.soup.get_text().strip()
        return None

    def get(self,*args, **kw):
        if self.soup:
            return self.soup.get(*args, **kw)
        return None

class base:
    @staticmethod
    def del_none(dic):
        no_list = []
        for k,v in dic.items():
            if not v or v == '':
                no_list.append(k)
        for k in no_list:
            del(dic[k])

    def get_data(self):
        base.del_none(self.ret_dict)
        return self.ret_dict

class company(base):

    def __init__(self, html, url):
        soup = BeautifulSoup(html,'lxml')
        self.url = url
        try:
            self.ret_dict = {}
            self.ret_dict.update(self.get_summary(soup))
            self.ret_dict.update(self.get_product(soup))
            self.ret_dict.update(self.get_action(soup))
            self.ret_dict.update(self.get_portfolio(soup))
            self.ret_dict.update(self.get_founders(soup))
        except Exception as e:
            logging.exception(e)
            DB['FAILE_COMPANY'].insert_one(url)
        


    def get_summary(self,soup):
        summary_soup = soup.find('div', class_='summary')
        if not summary_soup:
            return {}
        summary_soup = FIND_SOUP(summary_soup)
        data_dic = {}
        data_dic['is_exit'] = summary_soup.find('div','copy').string
        data_dic['page_name'] = summary_soup.find('h1','name').string
        data_dic['acquired'] = summary_soup.find('div','acquired').string
        data_dic['describe'] = summary_soup.find('h2','high_concept').string
        data_dic['tags'] = [FIND_SOUP(node).string.strip() for node in summary_soup.find('div','tags').find_all('a')]
        return data_dic

    def get_product(self,soup):
        main_soup = soup.find('div', class_='panes_container')  
        if not main_soup:
            return {}
        main_soup = FIND_SOUP(main_soup)
        data = main_soup.find('div',class_='startups-show-sections').get('data-startup')
        dic =json.loads(data)
        return dic

    def get_portfolio(self,soup):
        main_soup = soup.find('div', class_='panes_container')  
        if not main_soup:
            return {}
        main_soup = FIND_SOUP(main_soup)
        data = main_soup.find('div',class_='startup_roles').get('data-roles')
        if not data:
            return {}
        dic = json.loads(data)
        return {'portfolio':dic}

    def get_founders(self,soup):
        main_soup = soup.find('div', class_='panes_container').find('div', class_='founders')
        if not main_soup:
            return {}
        main_soup = FIND_SOUP(main_soup)
        ret_list = []
        for each in  main_soup.find_all('li',class_='role'):
            each = FIND_SOUP(each)
            data_json = each.find('div').get('data-startup_role')
            if data_json:
                data_dic = json.loads(data_json)
                data_dic['name'] = each.find('div', class_='name').find('a').string
                data_dic['role_title'] = each.find('div', class_='role_title').get_text()
                data_dic['bio'] = each.find('div', class_='bio').get_text()
                ret_list.append(data_dic)
        return {'founders':ret_list}

    def get_action(self,soup):
        summary_soup = soup.find('div', class_='actions')
        if not summary_soup:
            return {}
        summary_soup = FIND_SOUP(summary_soup)
        ret_dict = {}
        ret_dict['twitter_url'] = summary_soup.find('a','twitter_url').get('href')
        ret_dict['facebook_url'] = summary_soup.find('a','facebook_url').get('href')
        ret_dict['linkedin_url'] = summary_soup.find('a','linkedin_url').get('href')
        ret_dict['blog_url'] = summary_soup.find('a','blog_url').get('href')
        ret_dict['company_url'] = summary_soup.find('a','company_url').get('href')

        ret_dict['followers_count'] = summary_soup.find('div','count').string
        return ret_dict


class person(base):

    def __init__(self, html, url):
        self.soup = BeautifulSoup(html,'lxml')
        self.url = url
        try:
            self.ret_dict = {}
            self.ret_dict.update(self.get_prefix())
        except Exception as e:
            logging.exception(e)
            DB['FAILE_PERSON'].insert_one(url)


    def get_prefix(self):
        prefix_soup = self.soup.find('div', 'prefix')
        if not prefix_soup:
            return {}
        prefix_soup = FIND_SOUP(prefix_soup)
        data_dic = {}
        data_dic['name'] = prefix_soup.find('h1', 'js-name').get_text()
        data_dic['bio'] = prefix_soup.find('h2').get_text()
        data_dic['tags'] = [FIND_SOUP(each).get('title') for each in prefix_soup.find('div', 'tags').find_all('span', 'tag')]

        for each in prefix_soup.find('div', 'links').find_all('span', 'link'):
            data_dic[each.find('a').get('data-field')] = each.find('a').get('href')
        return data_dic


def CHECK_PAGE(html):
    if 'Followers'in html and 'Activity' in html and 'Overview' in html:
        return 1
        
    elif 'Investments' in html:
        return 2
        person(html)
    else:
        return 0