# -*- coding: utf-8 -*-
from wbhttp import WbHttp
import urllib.parse
from logger import logger
import re
import json
from bs4 import BeautifulSoup
from model import Timeline

class WeiboSearch(object):

    ## 关键字搜索微博
    def search_timelines_by_keyword(self, text, page):
        url = "http://s.weibo.com/weibo/" + urllib.parse.quote_plus(text) + "&page=%d" % (page)
        resp = WbHttp.get(url, headers=None, require_login=False)
        timelines = []
        if resp.status_code == 200:
            try:
                html = json.loads(re.findall(r"STK.pageletM.view\(({\"pid\":\"pl_weibo_direct\".*)\)", resp.text)[0])[
                    'html']
                soup = BeautifulSoup(html, "html.parser")
                list = soup.find_all(attrs={"action-type": "feed_list_item"})
                for item in list:
                    timelines.append(self.parse_weibo_html(item))
                logger.info('关键字%s搜索出了%d条结果' % (text, len(list)))
            except IndexError:
                logger.warning('关键字%s搜索错误' % (text))
            return timelines
        else:
            logger.warning('关键字%s搜索错误' % (text))
            return timelines

    ## 解析出微博的内容
    def parse_weibo_html(self, content):
        link = content.find("a", class_="name_txt")['href']
        username = content.find("a", class_="name_txt")['nick-name']
        text = content.find("p", class_="comment_txt").get_text()
        time = content.find("a", class_="W_textb")['date']
        imgs = content.find_all("img", class_="bigcursor")
        mid = content['mid']
        timeline = Timeline(mid, text, imgs, username, time, link)
        return timeline
