import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
import time
from util.config import get_header,get_log
import arrow
import re

Loger = get_log('itjuzi')
PROXY = {
    # 'http':'http://192.168.1.220:8087',
    'http':'http://localhost:8087',
}
# http timeout
TIMEOUT = 15 
# http retry times
REQ_RETYP = 3
# next request deily
NEXT_DAILY = 10
# how long update day
UPDATE_DELAY = 15
# choice db
DB = MongoClient('192.168.1.220',29001)['itjuzi']

class Company:

    def __init__(self, html):
        soup = BeautifulSoup(html, 'lxml')
        self.sum_dic = {}
        self.sum_dic.update(self.get_company_detail(soup))
        self.sum_dic.update(self.get_company_basic_info(soup))
        self.sum_dic.update(self.get_team(soup))
        self.sum_dic.update(self.get_product(soup))
        self.sum_dic.update(self.get_business(soup))
        # del empty node
        self.sum_dic = dict(filter(lambda x:x[1],self.sum_dic.items()))
        # assert '_id' in self.sum_dic

        # print(self.sum_dic)


    def get_company_detail(self, soup):
        sub_soup = soup.find('div', 'picinfo')
        ret_dic = {}
        
        name = sub_soup.find('span','title').get_text().strip().split('\n')[0].strip()
        assert name.replace('\t',''),'No coms'

        ret_dic['name']=name.replace('\t','')
        
        info_lines = sub_soup.find_all('div','info-line')
        Slogan = info_lines[0].get_text().strip()
        ret_dic['slogan']=Slogan
        
        spans = info_lines[1].find_all('span')
        a = spans[0].find_all('a')
        ret_dic['category'] = [each.get_text() for each in a] 
        
        spans = info_lines[1].find_all('span')
        a = spans[1].find_all('a')
        ret_dic['locate'] = [each.get_text() for each in a]
        
        link_line = sub_soup.find('div','link-line') 
        if link_line.find('a','link-weibo'):
            ret_dic['weibo'] = link_line.find('a','link-weibo').get('href')
        if link_line.find('a','weblink'):
            ret_dic['weblink'] = link_line.find('a','weblink').get('href')
            
        tagset = soup.find('div', 'tagset')
        ret_dic['tagset'] = [tag.get_text() for tag in tagset.find_all('a')]
        
        return ret_dic
    

    def get_company_basic_info(self, soup):
        sub_soup = soup.find('div', 'main')
        ret_dic = {}
        
        bgpink = sub_soup.find('div', 'bgpink')
        ret_dic['des'] = bgpink.find('div', 'des').get_text(strip=True)
        
        des_more = sub_soup.find('div', 'des-more')
        h2s = des_more.find_all('h2')
        for h2 in h2s:
            if u'：' in  h2.get_text():
                val = h2.get_text().split('：')
                ret_dic[val[0].strip()] = val[1].strip()
        ret_dic['states'] = des_more.find_all('span')[-2].get_text() if len(des_more.find_all('span')) > 0 else None
        
        
        list_round = sub_soup.find('table', 'list-round-v2')
        if list_round:
            fundinground = []
            for tr in list_round.find_all('tr'):
                tds = tr.find_all('td')
                date = tds[0].find('span','date').get_text().strip()
                rounds = tds[1].find('span','round').get_text().strip()
                finades = tds[2].find('span','finades').get_text().strip()
                investors = [a.get_text().strip() for a in tds[3].find_all('a')]
                fundinground.append({'date':date,'rounds':rounds,'finades':finades,'investors':investors})
            ret_dic['fundinground']=fundinground
            
        list_round_bg = sub_soup.find('div', 'invst-data')
        if list_round_bg:
            ret_dic['merge'] = []
            for li in list_round_bg.find_all('li'):
                name = li.find('a','title').get_text()
                ret_dic['merge'].append(name)
        return ret_dic

    def get_team(self, soup):
        sub_soup = soup.find('div', 'institu-member')
        ret_dic = {}

        ul = sub_soup.find('ul','list-prodcase')
        if not ul:
            return {}
        lis = []
        for li in ul.find_all('li'):
            dic = {}
            div = li.find('div','right')
            dic['pid'] = div.find('span', 'c').find_parent('a').get('href').split('/')[-1]
            dic['name'] = div.find('span', 'c').string
            dic['position']  = div.find('span', 'c-gray').string
            dic['des']  = div.find('p', 'person-des').get_text().strip()
            try:
                dic['weibo']  = div.find('i', 'fa-weibo').find_parent('a').get('href')
            except:
                dic['weibo'] = None
            lis.append(dic)
        ret_dic['team'] =  lis
        return ret_dic

    def get_product(self, soup):
        sub_soup = soup.find('div', 'main')
        ret_dic = {}

        ul = sub_soup.find('ul','list-prod')
        if ul:
            lis = []
            for li in ul.find_all('li'):
                dic = {}
                dic['tag'] = li.find('span', 'tag').string
                dic['name'] = li.find('b').get_text().strip()
                dic['des'] = li.find('p').string
                lis.append(dic)
            ret_dic['product'] =  lis
        return ret_dic

    def get_business(self, soup):
        sub_soup = soup.find('div', 'indus-info')
        ret_dic = {}
        if sub_soup:
            # table = sub_soup.find('table').find_all('tr')[1]
            ret_dic['business'] = {}
            faren = sub_soup.select('table tr:nth-of-type(2) > td:nth-of-type(1)')
            zhucezijin = sub_soup.select('table tr:nth-of-type(2) > td:nth-of-type(2)')
            states = sub_soup.select('table tr:nth-of-type(2) > td:nth-of-type(5)')
            ret_dic['business']['faren'] = faren[0].get_text(strip=True)
            ret_dic['business']['zhucezijin'] = zhucezijin[0].get_text(strip=True)
            ret_dic['business']['states'] = states[0].get_text(strip=True)
            for each in sub_soup.select('.list-text-info > li'):
                data = each.get_text(strip=True).split('：')
                ret_dic['business'][data[0]] = data[1]
        return ret_dic

 
def get_html(url):
    tm = 5
    for _ in range(REQ_RETYP):
        try:
            # browser.get(url)
            # return browser.page_source
            req = requests.get(url,timeout=TIMEOUT,headers=get_header(),proxies=PROXY)
            assert req.status_code == 200, req.status_code
            req.encoding = 'utf8'
            return req.text 
        except requests.exceptions.RequestException:
            Loger.warning('http fail')
        except Exception as e:
            Loger.warning(e)
            time.sleep(tm)
            tm += 5
    Loger.error('%s time over'%url)
    return None
  

def update(): 

    url = 'http://www.itjuzi.com/company/%s'
    aw = arrow.now()
    old_ids = [each.get('_id') for each in DB['company'].find({'update':{'$lte':aw.replace(days=-UPDATE_DELAY).datetime}},{'_id':1})]
    for i in old_ids:
        Loger.debug('%s start update'%i)

        html = get_html(url%i)
        if not html:
            Loger.error('%s update fail'%i)
            continue

        if '找不到您访问的页面'in html:
            DB['temp'].update_one({'_id':i},{'$set':{'update':aw.floor('day').datetime}},upsert=True)
            Loger.error('%s update lost'%i)
            continue

        # assert 'ider' in req.text

        try:
            com = Company(html)
            com.sum_dic['_id'] = i
            com.sum_dic['update'] = aw.floor('day').datetime
            DB['company'].update_one({'_id':i},{'$set':com.sum_dic},upsert=True)

            com.sum_dic['_id'] = '%s_%s'%(i,aw.floor('day').format('YYYYMMDD'))
            com.sum_dic['id'] = i
            DB['company_history'].insert_one(com.sum_dic)
            Loger.debug('%s update success'%i)
            time.sleep(NEXT_DAILY)
        except AssertionError as e:
            DB['temp'].update_one({'_id':i},{'$set':{'update':aw.floor('day').datetime}},upsert=True)
            Loger.error('%s crawl lost'%i)
        except Exception as e:
            Loger.exception(e)
            continue

def main():
    aw = arrow.now()
    url = 'http://www.itjuzi.com/company/%s'
    FINSH = [each.get('_id') for each in DB['company'].find({},{'_id':1})]
    max_id = max(FINSH) if FINSH else 0
    Loger.debug('%s max_id'%max_id)
    FAIL = [each.get('_id') for each in DB['temp'].find({'_id':{'$lte':max_id},'update':{'$gte':aw.replace(days=-UPDATE_DELAY).datetime}},{'_id':1})]
    serial_count = 0
    for i in range(1,70000):
        if i in FINSH or i in FAIL:
            continue

        Loger.debug('%s start crawl'%i)
        html = get_html(url%i)
        if not html:
            Loger.error('%s crawl fail'%i)  
            continue

        if '找不到您访问的页面'in html:
            DB['temp'].update_one({'_id':i},{'$set':{'update':aw.floor('day').datetime}},upsert=True)
            Loger.error('%s crawl lost'%i)
            serial_count += 1
            if serial_count>10:
                break
            continue

        # assert 'ider' in req.text

        try:
            com = Company(html)
            com.sum_dic['_id'] = i
            com.sum_dic['update'] = aw.floor('day').datetime
            DB['company'].update_one({'_id':i},{'$set':com.sum_dic},upsert=True)
            com.sum_dic['_id'] = '%s_%s'%(i,aw.floor('day').format('YYYYMMDD'))
            com.sum_dic['id'] = i
            DB['company_history'].insert_one(com.sum_dic)
            Loger.debug('%s crawl success'%i)
            serial_count = 0
        except AssertionError as e:
            DB['temp'].update_one({'_id':i},{'$set':{'update':aw.floor('day').datetime}},upsert=True)
            Loger.error('%s crawl lost'%i)
            serial_count += 1
            if serial_count>50:
                break
        except Exception as e:
            # print(html)
            Loger.exception(e)
            continue

        time.sleep(NEXT_DAILY)


class emptyPage(Exception):
    def __str__(self):  
        return 'page not exixt'

class MySoup(BeautifulSoup):
    '''
    包装类
    '''
    def select(self, selector, _candidate_generator=None, limit=None, force=True):
        value = super().select(selector,_candidate_generator,limit)
        if value or force:
            return value
        return MySoup('<a><a></a></a>', 'xml').select_one('a > a')

    def select_one(self,selector,force=False):

        value = self.select(selector, limit=1)
        if value  or force:
            return value[0]

        return MySoup('<a><a></a></a>', 'xml').select_one('a > a')

class base:
    def __init__(self,html):
        self.soup = MySoup(html,'lxml')

    def assert_dic(self, dic):
        pass


    def parse(self):
        pass

class Person(base):

    def assert_dic(self, dic):
        dic = dict(filter(lambda x:x[1],dic.items()))

        # assert name.replace('\t',''),'No coms'
        # assert 'des' in dic
        # assert 'category' in dic
        # assert 'locate' in dic
        # assert 'name' in dic
        # assert 'tagset' in dic
        return dic

    def parse(self):
        ret_dic = {}

        ## head info

        ret_dic = {}

        ret_dic['name'] = self.soup.select_one('.name').get_text(strip=True)
        ret_dic['role'] = self.soup.select_one('.tag').get_text(strip=True)
        ret_dic['role'] = self.soup.select_one('.tag').get_text(strip=True)

        ret_dic['short_exp'] = [tag.get_text(strip=True) for tag in self.soup.select('.titleset > span')]
        ret_dic['location'] = self.soup.select_one('.fa-map-marker').parent.get_text(strip=True)
        ret_dic['des'] = self.soup.select_one('head > meta[name=Description]').attrs.get('content')

        ret_dic['verture_exp'] = []
        for tag in self.soup.select('.long > .right'):
            dic = {}
            dic['org_name'] = tag.select_one('.long').get_text(strip=True)
            dic['org_type'] = tag.select_one('.text > b').get_text(strip=True)
            dic['org_fundon'] = tag.select_one('.c-gray').get_text(strip=True)
            dic['org_desc'] = list(tag.select_one('.text').stripped_strings)[-1]

            ret_dic['verture_exp'].append(dic)

        ret_dic['work_exp'] = [each.get_text(strip=True) for each in self.soup.find('h2',text=re.compile('工作经历')).find_parent('div','sec').select('li')]
        ret_dic['edu_exp'] = [each.get_text(strip=True) for each in self.soup.find('h2',text=re.compile('教育经历')).find_parent('div','sec').select('li')]


        ret_dic = self.assert_dic(ret_dic) 
        return ret_dic

def test():
    url = 'http://www.itjuzi.com/person/2'
    cookie = {'Cookie':'acw_tc=AQAAAABaaxPbfgIAkVEQcC6d2w3MCN9I; gr_user_id=b2f925ef-4994-4aee-bc7b-0a1395171968; _hp2_id.2147584538=%7B%22userId%22%3A%225837954218760983%22%2C%22pageviewId%22%3A%226402464240000847%22%2C%22sessionId%22%3A%225399898142871267%22%2C%22identity%22%3Anull%2C%22trackerVersion%22%3A%223.0%22%7D; acw_sc=595d91684bb40fb958bee4cc68da1d974e9921e1; identity=326737833%40qq.com; remember_code=1FQQAzGfLu; session=967213b1f06717d3b9ffc9db87c49f67b57c113e; _ga=GA1.2.1277010002.1490167436; _gid=GA1.2.689510366.1500605053; Hm_lvt_1c587ad486cdb6b962e94fc2002edf89=1498200405,1500027712,1500128389; Hm_lpvt_1c587ad486cdb6b962e94fc2002edf89=1500611307; gr_session_id_eee5a46c52000d401f969f4535bdaa78=c341a010-f85e-4a40-bfb9-8ea3cdcdbeb9'}
    req = requests.get(url,timeout=10,headers=get_header(),cookies=cookie)
    req.encoding = 'utf8'
    with open('test/person_1.html','wb')as f:
        f.write(req.content)
    with open('test/person_1.html','rt')as f:
        html = f.read()
    person = Person(html)
    ret = person.parse()
    from pprint import pprint
    pprint(ret)

if __name__ == '__main__':
    # main()
    # update()
    test()
