# coding: utf8
import requests
import bs4
import json

class WXSogou:
    def __init__(self):
        self._session = requests.session()
        self._html = None
        self._bs = None

    @staticmethod
    def get_reading(dict_msg, key):
        if isinstance(dict_msg, dict):
            value = dict_msg.get(key, None)
            return None if not value else value.split(',')[0]

    @staticmethod
    def get_posting(dict_msg, key):
        if isinstance(dict_msg, dict):
            value = dict_msg.get(key, None)
            return None if not value else value.split(',')[1]

    @staticmethod
    def get_pure_value(descendants):
        r = ''
        for a in descendants:
            if not isinstance(a, bs4.element.Comment) \
               and isinstance(a, bs4.element.NavigableString):
                r += str(a)
        return r

    def parse_reading_and_posting(self):
        """
            获取月发文章数和平均阅读量
        """
        account_anti_url = 'http://weixin.sogou.com' \
                           + str(self._bs.find('div', attrs={'class': 'wrapper'})
                                 .find_all('script')[-1]).split('"')[-2]
        r = self._session.get(url=account_anti_url)
        if r.status_code == 200:
            r.encoding = 'utf8'
            json_code = json.loads(r.text)
            if json_code['code'] == 'success':
                return json_code.get('msg', None)
        return None

    def parse_data_of_html(self):
        """
            解析、提取搜狗公众号第一页的数据
        """
        msg  = self.parse_reading_and_posting()
        info = []
        for li in self._bs.find('ul', attrs={'class': 'news-list2'}).find_all('li'):

            acc_name     = self.get_pure_value(li.find('p', attrs={'class': 'tit'}).a.descendants)
            perm_post    = self.get_posting(msg, str(li['d']))
            aver_reading = self.get_reading(msg, str(li['d']))

            acc_number   = str(li.find('p', attrs={'class': 'info'}).label.contents[0])
            acc_img      = str(li.find('div', attrs={'class': 'img-box'}).img['src'])
            acc_qrcode   = str(li.find('div', attrs={'class': 'ew-pop'}).find_all('img')[-2]['src'])
            acc_url      = str(li.find('div', attrs={'class': 'img-box'}).a['href'])
            acc_intro    = self.get_pure_value(li.dl.dd.descendants)

            # 最近
            had_gone_to_posted = len(li.find_all('dl')) > 1
            tag_dd       = li.find_all('dd')
            art_brief    = None if not had_gone_to_posted else self.get_pure_value(tag_dd[-1].a.descendants)
            art_url      = None if not had_gone_to_posted else str(tag_dd[-1].a['href'])
            art_time     = None if not had_gone_to_posted else int(tag_dd[-1].span.script.contents[0].split('\'')[-2])

            info.append({
                'acc_name'    : acc_name,
                'perm_post'   : perm_post,
                'aver_reading': aver_reading,
                'acc_number'  : acc_number,
                'acc_img'     : acc_img,
                'acc_qrcode'  : acc_qrcode,
                'acc_url'     : acc_url,
                'acc_intro'   : acc_intro,
                'art_brief'   : art_brief,
                'art_url'     : art_url,
                'art_time'    : art_time
            })
        return info

    def search(self, account):
            url = 'http://weixin.sogou.com/weixin' \
                  '?type=1' \
                  '&s_from=input' \
                  '&query={account}' \
                  '&ie=utf8' \
                  '&_sug_=n' \
                  '&_sug_type_='.format(account=account)

            r = self._session.get(url=url)
            if r.status_code == 200:
                r.encoding = 'utf8'
                self._html = r.text
                self._bs = bs4.BeautifulSoup(markup=self._html, features='html.parser')
                info = self.parse_data_of_html()
                return info


if __name__ == '__main__':
    sogou = WXSogou()
    info = sogou.search('电影公众号')
    print(info)