r"""Core functionality for sina crawler.
"""

import requests
import traceback
from time import time
from math import floor
from random import random
from json import loads
from re import match
from bs4 import BeautifulSoup
import crawler.utils


__DEBUG__ = True 

class SinaCrawler:
    """
    Base class for sina rawlers. All crawler implementations should extends this class.
    For now, we only support sequential crawler. We might implement a parallel crawler
    later.
    """
    def __init__(self):
        self.crawlerType = 'BaseCrawler'
        self.session = requests.Session()
        self.session.headers = ConstantsPool.HEADERS;
        self.is_login= False
        self.cookies = dict()
    def doLogin(self):
        ret = self.session.get('https://passport.weibo.cn/signin/login',
                headers = {'accept': 'text/html, application/xhtml+xml, application/xml;\
                        q=0.9,image/webp,*/*;q=0.8',
                        'accept-encoding': 'gzip, deflate, sdch, br',
                        'host': 'passport.weibo.cn',
                        'upgrade-insecure-requests': 1})
        #crawler.utils.printResponse(ret)
        #Try to login
        ret = self.session.get(
                ConstantsPool.PRE_LOGIN + 
                '&callback=' + self._siteId('jsonpcallback'),
                headers = {'accept': '*/*',
                        'accept-encoding': 'gzip, deflate, sdch, br',
                        'host': 'login.sina.com.cn',
                        'referer': 'https://passport.weibo.cn/signin/login'})
        if __DEBUG__:
            crawler.utils.printResponse(ret)
        ret = self.session.post(ConstantsPool.LOGIN, 
                data = {'username': 'anglenet@sina.cn',
                        'password': 'passwardme1',
                        'savestate': 1,
                        'ec': 0,
                        'entry': 'mweibo'},
                headers = {'accept': '*/*',
                        'accept-encoding': 'gzip, deflate, br',
                        'content-type': 'application/x-www-form-urlencoded',
                        'host': 'passport.weibo.cn',
                        'origin': 'https://passport.weibo.cn',
                        'referer': 'https://passport.weibo.cn/signin/login'})
        if __DEBUG__:
            # Json response
            crawler.utils.printResponse(ret, True)
        self.session.cookies = ret.cookies
        #Give access to 
        #   sina.com.cn
        #   weibo.cn
        #   weibo.com
        #print(ret.json())
        crossdomainlist = ret.json()['data']['crossdomainlist']
        ret = self.session.get('https:' + crossdomainlist['sina.com.cn']+
                '&savestate=1&callback=' + self._siteId('jsonpcallback'),
                headers = {'accept' : '*/*',
                        'accept-encoding': 'gzip, deflate, sdch, br',
                        'host': 'login.sina.com.cn',
                        'refer': 'https://passport.weibo.cn/signin/login'})
        if __DEBUG__:
            crawler.utils.printResponse(ret)
        if self._isLogined(ret):
            self.cookies['sina.com.cn'] = ret.cookies
        ret = self.session.get('https:' + crossdomainlist['weibo.cn']+
                '&savestate=1&callback=' + self._siteId('jsonpcallback'),
                headers = {'accept' : '*/*',
                        'accept-encoding': 'gzip, deflate, sdch, br',
                        'host': 'passport.weibo.cn',
                        'refer': 'https://passport.weibo.cn/signin/login'})
        if __DEBUG__:
            crawler.utils.printResponse(ret)
        if self._isLogined(ret):
            self.cookies['weibo.cn'] = ret.cookies
        ret = self.session.get('https:' + crossdomainlist['weibo.com']+
                '&savestate=1&callback=' + self._siteId('jsonpcallback'),
                headers = {'accept' : '*/*',
                        'accept-encoding': 'gzip, deflate, sdch, br',
                        'host': 'passport.weibo.com',
                        'refer': 'https://passport.weibo.cn/signin/login'})
        if __DEBUG__:
            crawler.utils.printResponse(ret)
        if self._isLogined(ret):
            self.cookies['weibo.com'] = ret.cookies
        #For now, we shall have full accesses
    def getWeiboCom(self):
        if self.is_login and self.cookies['weibo.com']:
            ret = self.session.get(ConstantsPool.SINA_COM,
                    headers = {
                            }, cookies = self.cookies['weibo.com'])
            if __DEBUG__:
                crawler.utils.printResponse(ret)
            self.cookies['weibo.com'].update(ret.cookies)
    def getIndexHotNew(self):
        if self.is_login and self.cookies['weibo.com']:
            ret = self.session.get(ConstantsPool.INDEX_HOT_NEW,
                    headers = {'accept': 'text/html,application/xhtml+xml,\
                            application/xml;q=0.9,image/webp,*/*;q=0.8',
                            'accept-encoding': 'gzip, deflate, sdch',
                            'host': 'd.weibo.com',
                            'referer': 'http://weibo.com/u/5738521971/home?wvr=5',
                            'upgrade-insecure-requests': 1},
                    cookies = self.cookies['weibo.com'])
            if __DEBUG__:
                crawler.utils.printResponse(ret)
            return ret
    def tinyTopicRankCrawler(self, link):
        """
        Return a dictionary of all trending topics on a single page.
        """
        response = self.reliableHttpGet(link, 
                    headers = {'accept': 'text/html,application/xhtml+xml,\
                            application/xml;q=0.9,image/webp,*/*;q=0.8',
                            'accept-encoding': 'gzip, deflate, sdch',
                            'host': 'd.weibo.com',
                            'upgrade-insecure-requests': 1})
        if response:
            return TrendingTopicParser.getTrendingTopics(response.text, self, True)
            pass
        else:
            print('Network Error has detected.')
            return None


    def trtopicCrawler(self, tr_link_prefix, rank_type, tr_type, has_rank, city_code = 0):
        """
        Before the crawler starts to crawl data, the crawler should have
        logined successfully. We do not check it again.
        """
        trtopics = dict()
        page_num = 1
        while(True):
            if tr_type == 0:
                # Normal topic
                link = 'http://d.weibo.com' + tr_link_prefix + \
                        '?pids=Pl_Discover_Pt6Rank__' + str(rank_type) + \
                        '&cfs=920&Pl_Discover_Pt6Rank__' + str(rank_type) + \
                        '_filter=&Pl_Discover_Pt6Rank__' + str(rank_type) + \
                        '_page=' + str(page_num) + '#Pl_Discover_Pt6Rank__' \
                        + str(rank_type)
            elif tr_type == 1:
                # Hot topic
                link = 'http://d.weibo.com' + tr_link_prefix + \
                        '?pids=Pl_Discover_Pt6Rank__' + str(rank_type) + \
                        '&cfs=920&Pl_Discover_Pt6Rank__' + str(rank_type) + \
                        '_filter=hothtlist_type=1&Pl_Discover_Pt6Rank__' +\
                        str(rank_type) + '_page=' + str(page_num) + \
                        '#Pl_Discover_Pt6Rank__' + str(rank_type)
            elif tr_type == 2:
                # Local topic
                link = 'http://d.weibo.com' + tr_link_prefix + \
                        '?from=faxian&Pl_Discover_Pt6Rank__5_filter=cityid=' + \
                        city_code
            else:
                print("Wrong tr_type!")
                return dict()
            headers = {'accept': 'text/html,application/xhtml+xml,'\
                    'application/xml;q=0.9,image/webp,*/*;q=0.8',
                    'accept-encoding': 'gzip, deflate, sdch',
                    'upgrade-insecure-requests': 1,
                    'host': 'd.weibo.com'}
            response = self.reliableHttpGet(link, headers)
            print(response)
            if response:
                response_tr = TrendingTopicParser.getTrendingTopics(response.text, self, has_rank)
                print(response_tr)
                # Got empty response
                if not response_tr:
                    # We should get all related trending topics now 
                    return trtopics
                trtopics.update(response_tr)
            page_num += 1

    @crawler.utils.WatchRequest
    def reliableHttpGet(self, url, headers, max_tries = 4):
        try_count = 1
        while(True):
            if(try_count > max_tries):
                return None
            try:
                ret = self.session.get(url, headers = headers, 
                        cookies = self.cookies['weibo.com'])
                return ret
                break
            except requests.RequestException as e:
                traceback.print_exc()
            try_count += 1

    def closeSession(self):
        self.session.close()
    def _isLogined(self, ret):
        ret_data = match(r'.*(\{.*\}){1}.*', ret.text)
        if ret_data:
            ret_data = loads(ret_data.group(1))
        else:
            return False
        if ret_data['retcode'] == 20000000:
            self.is_login = True
            return True 
        else:
            print('Login for ' + ret.request.headers['host'] + ' failed, and '
                    'return message is: ' + ret_data['msg'])
            return False
    def _siteId(self, prefix):
        id = int(round(time()*1000) + 
                floor(random() * 100000))
        if prefix == '':
            return str(id)
        else:
            return prefix + str(id)
    def __str__(self):
        return self.crawlerType

class TrendingTopicParser:
    """
    A simple parser for parsing trending topic information from html tags.
    """
    def getTrendingTopics(html_doc, spider, has_rank=False):
        """
        Return a dictionary of all trending topics contained in that tags.
        """
        if BeautifulSoup(html_doc, 'lxml').html:
            html_doc = str(BeautifulSoup(html_doc, 'lxml').find_all('script')[-1])
        html_doc = loads(match(r'.*(\{.*\}){1}.*',
                html_doc).group(1))
        print(html_doc)
        if 'html' not in html_doc:
            return None
        html_doc = html_doc['html'].replace('\t', '').replace('\n', '').replace('\r', '').replace('\\', '')
        bs = BeautifulSoup(html_doc,'lxml')
        trtopics = dict()
        trtopic_lines = bs.find_all('ul')[1]
        for child in trtopic_lines.find_all('li'):
            tr = TrendingTopic()
            # Find a trending topic block
            title_box = child.div.div.next_sibling.div.div
            subinfo_box = child.div.div.next_sibling.div.next_sibling.div
            if has_rank:
                tr.rank = int(title_box.span.contents[0][3:])
            tr.topic_link = title_box.a['href']
            tr.topic = title_box.a.contents
            subinfo_box = subinfo_box.next_sibling.next_sibling
            tr.topic_presenter = subinfo_box.span.span.a.contents[0]
            tr.topic_presenter_link = subinfo_box.span.span.a['href']
            trtopic_page =  spider.reliableHttpGet(tr.topic_link,
                    headers = {'accept': 'text/html,application/xhtml+xml,\
                            application/xml;q=0.9,image/webp,*/*;q=0.8',
                            'accept-encoding': 'gzip, deflate, sdch',
                            'host': 'd.weibo.com',
                            'upgrade-insecure-requests': 1})
            print('topic_link:' + tr.topic_link)
            if trtopic_page:
                print(trtopic_page.text)
                script_id = 0x0
                intro_wrapper = None
                num_wrapper = None
                relates_wrapper = None
                for script in BeautifulSoup(trtopic_page.text).find_all('script'):
                    script = str(script)
                    if script_id & 0x1 != 1 and script.find('Pl_Third_Inline__3') != -1:
                        print("Found one")
                        intro_wrapper = BeautifulSoup(loads(match(r'.*(\{.*\}){1}.*',
                                 script).group(1))['html'], 'lxml')
                        script_id += 1
                    if script_id & 0x2 != 0x2 and 'Pl_Core_T8CustomTriColumn__12' in script:
                        num_wrapper = BeautifulSoup(loads(match(r'.*(\{.*\}){1}.*',
                                 script).group(1))['html'], 'lxml')
                        script_id += 2
                    if script_id & 0x4 != 0x4 and 'Pl_Core_T5MultiText__31' in script:
                        relates_wrapper = BeautifulSoup(loads(match(r'.*(\{.*\}){1}.*',
                                 script).group(1))['html'], 'lxml')
                        script_id += 4
                #bs = BeautifulSoup(trtopic_page.text, 'lxml')
                #box_wrapper = bs.find('div', id='Pl_Third_Inline__3')
                #print(type(box_wrapper))
                print(intro_wrapper.prettify())
                tr.description = intro_wrapper.div.div.div.div.p.contents[1]
                #box_wrapper = bs.find('div', id='Pl_Core_T8CustomTriColumn__12')
                num_wrapper = num_wrapper.div.next_sibling.div.div.div.table.tbody.tr.td
                tr.num_readings = crawler.utils.strToInteger(num_wrapper.strong.contents[0])
                tr.num_discussion = crawler.utils.strToInteger(
                        num_wrapper.next_sibling.strong.contents)
                tr.num_fans = crawler.utils.strToInteger(
                        num_wrapper.next_sibling.next_sibling.strong.contents[0])
                #box_wrapper = bs.find('div', id='Pl_Core_T5MultiText__31')
                relates_wrapper = relates_wrapper.div.div.div.div.next_sibling.div.ul.li
                tr.categories = relates_wrapper.span.next_sibling.a.contents[1]
                tr.district = relates_wrapper.next_sibling.span.next_sibling.a.contents[1]
                relates_wrapper = relates_wrapper.next_sibling.next_sibling.span.next_sibling
                print(relates_wrapper)
                for tag in relates_wrapper.children:
                    tr.tags += tag.contents[1] + ','
                tr.tags = tr.tags[:-1]
                
            else:
                #Network error happens, try to restore message.
                tr.categories = title_box.a.next_sibling.contents[1]
                tr.description = title_box.next_sibling.contents
                tr.num_readings = crawler.utils.strToInteger(subinfo_box.span.span.span.contents[0])
            trtopics[tr.topic] = tr
        print(trtopics)
        return trtopics 
    
class TrendingTopic:
    """
    A formal representation for a single trending topic.
    """
    def __init__(self):
        self.rank = 0 
        self.description = ''
        self.topic = ''
        self.topic_link = ''
        self.topic_presenter = ''
        self.topic_presenter_link = ''
        self.num_readings = 0
        self.num_discussion = 0
        self.num_fans = 0
        self.categories = ''
        self.tags =  ''
        self.district = ''

    def __str__(self):
        return '{} {}\n{}\n{} {} {} {}\n{}\n{} {} {}'.format(self.rank, self.topic,
                self.description, 
                self.num_readings, self.num_discussion, self.num_fans, self.topic_presenter, 
                self.topic_presenter_link, 
                self.categories, self.district, self.tags)
    def randomTrtopic():
        from faker import Faker
        fake = Faker()
        tr = TrendingTopic()
        tr.rank = 10 
        tr.description = fake.text(max_nb_chars=50)
        tr.topic = fake.text(max_nb_chars=50)
        tr.topic_link = fake.text(max_nb_chars=50)
        tr.topic_presenter = fake.text(max_nb_chars=50)
        tr.topic_presenter_link = fake.text(max_nb_chars=50)
        tr.categories = fake.text(max_nb_chars=50)
        tr.tags = fake.text(max_nb_chars=50)
        tr.district = fake.text(max_nb_chars=50)
        tr.num_readings = 100
        tr.num_discussion = 100
        tr.num_discussion = 100
        return tr

class ConstantsPool:
    """
    Constants consumed by the crawler. These constants are captured by fiddler. Maybe changed 
    later. If sina changes it, you need to recapture the package and analyse the web 
    interface by your own to replace the deprecated constants.
    """
    PRE_LOGIN = 'https://login.sina.com.cn/sso/prelogin.php?\
            checkpin=1&entry=mweibo&su=YW5nbGVuZXQlNDBzaW5hLmNu'
    LOGIN = 'https://passport.weibo.cn/sso/login'
    SINA_COM = 'http://weibo.com'
    INDEX_HOT_NEW = 'http://d.weibo.com/100803?refer=index_hot_new'
    MOBILE_SINA_CN= 'http://m.weibo.cn'
    HEADERS = {
            'accept-language': 'en-US,en;q=0.8',
            'connection': 'keep-alive',
            'DNT': 1,
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) \
                    AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',
            }
    trtopic_link_prefix = {
            #Rank 5
            'topic_rank': '/100803_-_page_hot_list', 
            #'local_topic': '/100803_-_page_local_list',
            #'body_building':        '/100803_ctg1_111_-_ctg1111',
            #'health':               '/100803_ctg1_113_-_ctg1113',
            #'fashion':              '/100803_ctg1_114_-_ctg1114',
            #'baby':                 '/100803_ctg1_116_-_ctg1116',
            #'cars':                 '/100803_ctg1_117_-_ctg1117',
            #'daily_life':           '/100803_ctg1_120_-_ctg1120',
            #'beautify_photo':       '/100803_ctg1_123_-_ctg1123',
            #'games':                '/100803_ctg1_126_-_ctg1126',
            #'lovely_pet':           '/100803_ctg1_128_-_ctg1128',
            #'digital_device':       '/100803_ctg1_131_-_ctg1131',
            #'technology':           '/100803_ctg1_138_-_ctg1138',
            #'joke':                 '/100803_ctg1_140_-_ctg1140',
            #'military':             '/100803_ctg1_144_-_ctg1144',
            #'music':                '/100803_ctg1_146_-_ctg1146',
            #'education':            '/100803_ctg1_133_-_ctg1133',
            #'realstate':            '/100803_ctg1_137_-_ctg1137',
            #'culture':              '/100803_ctg1_142_-_ctg1142',
            #'constellation':        '/100803_ctg1_145_-_ctg1145',
            #'government':           '/100803_ctg1_147_-_ctg1147',
            #'rank4_starts': None,
            ##Rank 4
            #'society':              '/100803_ctg1_1_-_ctg11',
            #'stars':                '/100803_ctg1_2_-_ctg12',
            #'popular_science':      '/100803_ctg1_3_-_ctg13',
            #'sentiment':            '/100803_ctg1_5_-_ctg15',
            #'finance':              '/100803_ctg1_7_-_ctg17',
            #'commonweal':           '/100803_ctg1_6_-_ctg16',
            #'call_for_creativity':  '/100803_ctg1_9_-_ctg19',
            #'food':                 '/100803_ctg1_91_-_ctg191',
            #'travel':               '/100803_ctg1_93_-_ctg193',
            #'readings':             '/100803_ctg1_94_-_ctg194',
            #'animation':            '/100803_ctg1_97_-_ctg197',
            #'sports':               '/100803_ctg1_98_-_ctg198',
            #'movies':               '/100803_ctg1_100_-_ctg100',
            #'tvshow':               '/100803_ctg1_101_-_ctg101',
            #'variety_show':         '/100803_ctg1_102_-_ctg102',
            }
    city_codes= {
            'beijing':      '001011001',
            #'tianjin':      '001012001',
            #'shijiazhuang': '001013001',
            #'taiyuan':      '001014001',
            #'huhehaote':    '001015001',
            #'shengyang':    '001021001',
            #'changchun':    '001022001',
            #'haerbing':     '001023001',
            #'shanghai':     '001031001',
            #'nanjing':      '001032001',
            #'hangzhou':     '001033001',
            #'hefei':        '001034001',
            #'fuzhou':       '001035001',
            #'nanchang':     '001036001',
            #'jinan':        '001037001',
            #'zhengzhou':    '001041001',
            #'wuhan':        '001042001',
            #'changsha':     '001043001',
            #'guangzhou':    '001044001',
            #'nanning':      '001045001',
            #'haikou':       '001046001',
            #'chongqing':    '001050001',
            #'chengdu':      '001051001',
            #'guiyang':      '001052001',
            #'kunming':      '001053001',
            #'lasa':         '001054001',
            #'xian':         '001061001',
            #'lanzhou':      '001062001',
            #'xining':       '001063001',
            #'yinchuan':     '001064001',
            #'xinjiang':     '001065001',
            #'taiwan':       '001071001',
            #'xianggang':    '001081001',
            #'aomen':        '001082001',
            #'dalian':       '001021002',
            #'wuxi':         '001032002',
            #'xiamen':       '001035002',
            #'qingdao':      '001037002',
            #'suzhou':       '001032005',
            }
    current_timestamp = ''



