from bs4 import BeautifulSoup
import requests
from config import get_header,get_log
import re
from pprint import pprint


class emptyPage(Exception):
    def __str__(self):  
        return 'page not exixt'

class MySoup(BeautifulSoup):
    '''
    包装类
    '''
    def select(self, selector, _candidate_generator=None, limit=None, force=True):
        value = super().select(selector,_candidate_generator,limit)
        if value or force:
            return value
        return MySoup('<a><a></a></a>', 'xml').select_one('a > a')

    def select_one(self,selector,force=False):

        value = self.select(selector, limit=1)
        if value  or force:
            return value[0]

        return MySoup('<a><a></a></a>', 'xml').select_one('a > a')

class base:
    def __init__(self,html):
        self.soup = MySoup(html,'lxml')

    def assert_dic(self, dic):
        pass


    def parse(self):
        pass

class itjuzi(base):

    def assert_dic(self, dic):
        dic = dict(filter(lambda x:x[1],dic.items()))

        # assert name.replace('\t',''),'No coms'
        assert 'des' in dic
        assert 'category' in dic
        assert 'locate' in dic
        assert 'name' in dic
        assert 'tagset' in dic
        return dic

    def parse(self):
        ret_dic = {}

        ## head info

        name = self.soup.select('span > h1')

        ret_dic = {}
        
        # name = self.soup.select_one('h1').get_text().strip().split('\n')[0]
        # round_ =  self.soup.select_one('h1 > span').get_text(strip=True)
        # if not name.replace('\t',''): raise emptyPage()
        # ret_dic['name']=name.replace('\t','')
        # ret_dic['round']=round_.strip('()\t ')
        # ret_dic['slogan'] = self.soup.select('.seo-slogan')[0].get_text(strip=True)
        # ret_dic['category'] = [each.get_text() for each in self.soup.select('.scope > a') ]
        # ret_dic['locate'] = [each.get_text() for each in self.soup.select('.loca > a') ]
        # ret_dic['tagset'] = [each.get_text() for each in self.soup.select('.c-gray-aset span') ]
        # ret_dic['weibo'] = self.soup.select_one('a > .icon-weibo').parent.attrs.get('href')
        # ret_dic['weblink'] = self.soup.select_one('.douniwan').attrs.get('href')

        # # basic info
        # ret_dic['des'] = self.soup.select_one('head > meta[name=Description]').attrs.get('content')


        # for each in self.soup.select('.seo-second-title'):
        #     lis = each.get_text(strip=True).split('：') 
        #     ret_dic[lis[0]] = lis[1]
        # ret_dic['states'] = self.soup.select_one('.des-more .tag').get_text(strip=True)

        # fundinground = []
        # for tds in self.soup.select('.list-round-v2 tr'):
        #     date = tds.select_one('.date').get_text(strip=True)
        #     rounds = tds.select_one('.round').get_text(strip=True)
        #     finades = tds.select_one('.finades').get_text(strip=True)
        #     investors = [a.get_text(strip=True) for a in tds.select('td > a, td:nth-of-type(4) > span')]
        #     fundinground.append({'date':date,'rounds':rounds,'finades':finades,'investors':investors})
        # ret_dic['fundinground']=fundinground

        # ret_dic['merge'] = self.soup.select_one('.invst-data b').get_text(strip=True)

        # teams = []
        # for tds in self.soup.select('.institu-member li > div'):
        #     pid = tds.select_one('.person-name > a.title').attrs.get('href')
        #     name = tds.select_one('b > .c').get_text(strip=True)
        #     position = tds.select_one('b > .c-gray').get_text(strip=True)
        #     des = tds.select_one('.person-des').get_text(strip=True)
        #     weibo = tds.select_one('.flr > a').attrs.get('href') if tds.select_one('.flr > a') else None
        #     teams.append({'pid':pid,'name':name,'position':position,'des':des,'weibo':weibo})
        # ret_dic['team']=teams

        # # product
        # product = []
        # for tds in self.soup.select('.list-prod .on-edit-hide'):
        #     tag = tds.select_one('span.tag').get_text(strip=True)
        #     name = tds.select_one('h4 > b').get_text(strip=True)
        #     des = tds.select_one('p').get_text(strip=True)
        #     product.append({'tag':tag,'name':name,'des':des,})
        # ret_dic['product']=product


        # # business
        # bs_name = self.soup.select_one('.essential th').get_text(strip=True)
        # if bs_name:
        #     ret_dic['business'] = {}
        #     ret_dic['business']['name'] = bs_name
        #     for each in self.soup.select('.essential td'):
        #         try:
        #             ret_dic['business'][each.select_one('.tab_title').get_text(strip=True)] = each.select_one('.tab_main').get_text(strip=True)
        #         except:pass

        ret_dic = self.assert_dic(ret_dic) 
        return ret_dic

def test():
    url = 'http://www.itjuzi.com/person/1'
    cookie = {'Cookie':'acw_tc=AQAAAABaaxPbfgIAkVEQcC6d2w3MCN9I; gr_user_id=b2f925ef-4994-4aee-bc7b-0a1395171968; _hp2_id.2147584538=%7B%22userId%22%3A%225837954218760983%22%2C%22pageviewId%22%3A%226402464240000847%22%2C%22sessionId%22%3A%225399898142871267%22%2C%22identity%22%3Anull%2C%22trackerVersion%22%3A%223.0%22%7D; acw_sc=595d91684bb40fb958bee4cc68da1d974e9921e1; session=ad6e5eaaffcb91949e4189070fc634f9dff723ea; _gat=1; identity=326737833%40qq.com; remember_code=1FQQAzGfLu; Hm_lvt_1c587ad486cdb6b962e94fc2002edf89=1498200405,1500027712,1500128389; Hm_lpvt_1c587ad486cdb6b962e94fc2002edf89=1500133056; _ga=GA1.2.1277010002.1490167436; _gid=GA1.2.1637416395.1500027572; gr_session_id_eee5a46c52000d401f969f4535bdaa78=c2d75c14-28dc-494f-bde6-cc7c5cb9d00d'}
    req = requests.get(url,timeout=10,headers=get_header(),cookies=cookie)
    req.encoding = 'utf8'
    with open('../test/4.html','wb')as f:
        f.write(req.content)
    with open('../test/.html','rt')as f:
        html = f.read()

    jz = itjuzi(html)
    ret = jz.parse()




if __name__ == '__main__':
    test()