import requests
from bs4 import BeautifulSoup
import urllib
import pdb

class My_requests:

    def __init__(self):
        self.session = requests.Session()

    def _get(self,url):
        for _ in range(6):
            try:
                return self.session.get(url)
            except Exception as e:
                print(e)
                print(url)

    def get_json(self,url):
        req = self._get(url)
        return req.json()

    def get_text(self,url, encoding=None):
        req = self._get(url)
        if encoding:
            req.encoding=encoding
        return req.text

class A_share:

    def get_all_symbol(self,):
        '''
        purl: http://finance.sina.com.cn/data/#stock-schq-hsgs
        count 8, max 4000
        '''
        url = 'http://money.finance.sina.com.cn/d/api/openapi_proxy.php/?__s=[["hq","hs_a","",0,%s,500]]'

        mr = My_requests()
        ret_lis = []
        for i in range(1,8):
            jd = mr.get_json(url%i)

            if jd[0]['items']:
                for item in jd[0]['items']:
                    dic = {}
                    dic['type'] = item[0][0:2]
                    dic['symbol'] = item[1]
                    dic['_id'] = item[1]
                    dic['name'] = item[2]
                    ret_lis.append(dic)
        return ret_lis


    def get_company_detail(self, symbol):
        '''
        url: http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/000776.phtml
        '''
        pass


    def get_company_executives(self, symbol):
        '''
        url: http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpManager/stockid/000776.phtml
        '''
        url = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpManager/stockid/%s.phtml'
        mr = My_requests()
        html = mr.get_text(url%symbol, encoding='gb2312')
        soup = BeautifulSoup(html,'lxml')

        cate = None
        tag = None
        ret_lis = []
        for tr in soup.select('#comInfo1 tr'):
            tds = tr.select('td')
            th = tr.select_one('th')
            if th:
                cate = th.get_text(strip=True)
                # print(cate)

            if len(tds) == 1:
                tag = tds[0].get_text(strip=True)
            elif len(tds) == 4:
                dic = {}
                if tds[0].text != '姓 名':
                    dic['name'] = tds[0].get_text(strip=True)
                    dic['url'] = tds[0].find('a').get('href') if tds[0].find('a') else None
                    dic['_id'] = '%s_%s'%(symbol,dic['name'])
                    dic['post'] = tds[1].text
                    dic['start'] = tds[2].text
                    dic['tag'] = tag
                    dic['end'] = '2999-01-01' if tds[3].text == '--' else tds[3].text
                    if cate:
                        dic['cate'] = cate

                    ret_lis.append(dic)
            else:
                print(len(tds))
        return ret_lis


    def get_company_executives_more(self, symbol):
        def get_ex_urls(ex_lis, symbol):
            urls = set()
            url = 'http://vip.stock.finance.sina.com.cn/corp/view/vCI_CorpManagerInfo.php?stockid=%s&Name=%s'
            for each in ex_lis:
                name = each.get('name')
                name = urllib.parse.quote_from_bytes(name.encode('gb2312'))
                urls.add(url%(symbol,name))
            return urls

        ex_lis = self.get_company_executives(symbol)
        urls = get_ex_urls(ex_lis, symbol)
        mr = My_requests()
        ret_lis = []
        for url in urls:
            html = mr.get_text(url, encoding='gb2312')
            soup = BeautifulSoup(html,'lxml')
            post_lis = []
            person_dic = {}

            trs = soup.select('#Table1 tr')

            td = trs[1].select('td')
            person_dic['name'] = td[0].text
            person_dic['_id'] = '%s_%s'%(symbol,person_dic['name'])
            person_dic['sex'] = td[1].text
            person_dic['birth'] = td[2].text
            person_dic['edu'] = td[3].text
            person_dic['nation'] = td[4].text

            person_dic['des'] = trs[2].select('td')[1].get_text(strip=True)

            for tr in soup.select('#Table3 tr'):
                tds = tr.select('td')
                if len(tds)==5:
                    dic = {}
                    dic['com_name'] = tds[0].text
                    dic['post'] = tds[1].text
                    dic['start'] = tds[2].text
                    dic['end'] = '2999-01-01' if tds[3].text == '--' else tds[3].text
                    dic['reward'] = tds[4].get_text(strip=True) if tds[4].get_text(strip=True) else None
                    post_lis.append(dic)
                else:
                    print(tds)
            person_dic['post_his'] = post_lis
            ret_lis.append(person_dic)
        return ret_lis, ex_lis


def test():
    a = A_share()
    # ret = a.get_all_symbol()
    ret = a.get_company_executives_more('000776')
    print(ret)
    pdb.set_trace()

def main():
    import pymongo
    DB = pymongo.MongoClient('192.168.1.220',29001).sina


    a = A_share()
    symbols = a.get_all_symbol()

    try:        
        DB.symbol.insert_many(symbols)
    except pymongo.errors.BulkWriteError:
            # repeat insert
        pass

    for each in symbols:
        ret_lis, ex_lis = a.get_company_executives_more(each['symbol'])

        try:        
            DB.executive.insert_many(ret_lis)
        except pymongo.errors.BulkWriteError:
                # repeat insert
            pass

        try:        
            DB.executives_detail.insert_many(ex_lis)
        except pymongo.errors.BulkWriteError:
                # repeat insert
            pass
            TB_sy.insert_one(each)



    # 2

if __name__ == '__main__':
    main()