#coding:utf-8

from bs4 import BeautifulSoup
import re, json
from datetime import datetime
from Dao import SeedFollowee


def parse_pid(html):
    """

    :param html:
    :return: pid if exception occurs return None
    """
    soup = BeautifulSoup(html)
    script = soup.find('script', text=re.compile("\$CONFIG\[\'page_id\'\]"))

    try:
        script = script.text
        attributes = script.split(';')
        pid = ''
        for attr in attributes:
            if 'page_id' in attr:
                pid = attr.split('=')[1][1:-1]
                pid = str(pid)  # convert unicode to string
                return pid
    except Exception as e:
        print e.message
        return None


def parse_uid(html):
    """
    Parse the uid of this html
    :param html:
    :return:
    """
    soup = BeautifulSoup(html)
    script = soup.find('script', text=re.compile("\$CONFIG\[\'page_id\'\]"))

    try:
        script = script.text
        attributes = script.split(';')
        uid = ''
        for attr in attributes:
            if 'uid' in attr:
                uid = attr.split('=')[1][1:-1]
                uid = str(uid)  # convert unicode to string
                return uid
        return -1  # no uid
    except Exception as e:
        print e.message
        return None


def is_exceptional(html):
    soup = BeautifulSoup(html)
    if u' 您当前使用的账号存在异常，请完成以下操作解除异常状态' in soup.text:
        return True
    else:
        return False


def is_frozen(html):
    soup = BeautifulSoup(html)
    try:
        if u'微博帐号解冻' in soup.find('title').text:
            return True
        else:
            return False
    except Exception as e:
        print e.message
        return False


def is_visitor(html):
    '''

    :param html:
    :return: Ture if this account is in visitor status
    '''
    soup = BeautifulSoup(html)
    title = soup.find('title').text
    if u'Sina Visitor System' in title or u'新浪通行证' in title:
        return True
    else:
        return False


def convert_script_to_html(script):
    """

    :param script: a bs4 tag object
    :return: html if failed return None
    """
    script = script.text
    try:
        jsn = script[8:-1]
        return json.loads(jsn)['html']
    except Exception as e:
        print e.message
        return None


def get_max_page_num(links):
    '''

    :param links: a list of links, which contain page numbers
    :return: max page number
    '''

    max_pnum = 0

    for link in links:
        pnum = link.text
        if pnum.isdigit():
            pnum = int(pnum)
            if pnum > max_pnum:
                max_pnum = pnum
        else:
            continue

    return max_pnum


def parse_followee_page_num(html):
    """

    :param html:
    :return: followee page number, None if dirty html
    """

    soup = BeautifulSoup(html)

    scripts = soup.find_all('script')
    script = None
    for scr in scripts:
        if 'follow_item S_line2' in scr.text:  # follow_item S_line2 denotes one followee
            script = scr
            break

    if script is None:
        return 0
    else:  # user followed someones
        html = convert_script_to_html(script)
        if html is not None:
            soup = BeautifulSoup(html)
            W_pages = soup.find('div', 'W_pages')
            if W_pages is not None:
                page_links = W_pages.find_all('a', attrs={'bpfilter': 'page'})
                return get_max_page_num(page_links)
            else:
                return 1
        else:  # dirty html
            return None


def is_enterprise(followee):
    try:
        dd = followee.find('dd', 'mod_info S_line1')
        icons = dd.find('div', 'info_name W_fb W_f14')
        types = icons.find_all('i')
        for tp in types:
            try:
                title = tp['title']
            except KeyError:
                continue
            if u'微博个人认证' in title:
                return False
            elif u'微博机构认证' in title:
                return True
        return False
    except Exception as e:
        print e.message
        return None


def parse_followee_uid(followee):
    """

    :param followee: a li tag
    :return:
    """
    try:
        data = followee['action-data']
        data = data.split('&')
        for dt in data:
            if u'uid' in dt:
                return dt.split('=')[-1]
        return None
    except Exception as e:
        print e.message
        return None


def parse_followee_name(followee):
    """

    :param followee: a li tag
    :return:
    """
    try:
        data = followee['action-data']
        data = data.split('&')
        for dt in data:
            if u'fnick' in dt:
                return dt.split('=')[-1]
        return None
    except Exception as e:
        print e.message
        return None


def parse_followees(html, pid):
    """

    :param html:
    :param pid:
    :return: a list of followee objects
    """
    followees = []  # to return
    followee = {'suid': '', 'uid': '', 'nick_name': '', 'timestamp': ''}

    soup = BeautifulSoup(html)
    scripts = soup.find_all('script')
    script = None
    for scr in scripts:
        if 'follow_item S_line2' in scr.text:  # follow_item S_line2 denotes for one follower
            script = scr
            break
    if script is None:
        return []  # no followees

    html = convert_script_to_html(script)
    if html is None:
        return None  # dirty html

    soup = BeautifulSoup(html)

    followee_list = []
    for flist in soup.find_all('ul', 'follow_list'):
        # maybe there are two follow list one is the common one, the other is the recommendation one
        followee_list.extend(flist.find_all('li', 'follow_item S_line2'))

    for fee in followee_list:  # start to parse...
        if is_enterprise(fee) is True:
            continue  # ignore enterprises
        followee['suid'] = pid[6:]
        followee['uid'] = parse_followee_uid(fee)
        if followee['uid'].isdigit() is False:
            continue  # ignore topics
        followee['nick_name'] = parse_followee_name(fee)
        followee['timestamp'] = datetime.now()
        followees.append(SeedFollowee(followee))

    return followees


def parse_origin_mid(timeline):
    try:
        mid = timeline['mid']
        return mid
    except Exception as e:
        print e.message
        return None


def parse_origin_time(timeline):
    created_time = timeline.find('a', 'S_link2 WB_time')
    try:
        return created_time['title']
    except Exception as e:
        print e.message
        return None


def parse_latest_timeline(uid, html):
    """

    :param uid:
    :param html:
    :return: result_user or None if no timeline
    """
    result_user = {'uid': uid, 'latest_tmln_mid': '', 'latest_tmln_time': '', 'timestamp': datetime.now()}
    soup = BeautifulSoup(html)
    timeline = soup.find('div', 'WB_feed_type SW_fun S_line2 ')
    # the empty space after WB_feed_type SW_fun S_line2 is a trick
    # maybe some problems would come up if Sina modify his strategy
    if timeline is None:
        return None

    result_user['latest_tmln_mid'] = parse_origin_mid(timeline)
    result_user['latest_tmln_time'] = parse_origin_time(timeline)

    return result_user


def parse_exceptional_user(html):

    if u'抱歉，您当前访问的帐号异常，暂时无法访问。(10008)' in html.decode('utf-8'):
        return True
    else:
        return False


