import logging
import os
import re
import traceback
from time import sleep

import emoji
from bs4 import BeautifulSoup
from requests_html import HTMLSession
from General.Mapper import *
import datetime


def strCount_To_intCount(str_count):  # 包含汉字的字符串转整型
    if str_count[-2:] == '万亿':
        return int(float(str_count[:-2]) * 1000000000000)
    elif str_count[-1:] == '亿':
        return int(float(str_count[:-1]) * 100000000)
    elif str_count[-1:] == '万':
        return int(float(str_count[:-1]) * 10000)
    elif str_count[-1:] == '千':
        return int(float(str_count[:-1]) * 1000)
    elif str_count[-1:].isdigit():
        return int(str_count)


'''def filtrate_emoji(s):
    print(s)
    s = emoji.demojize(s)
    print(s)
    return s'''

'''def filtrate_emoji(s):  # 过滤表情
    res = re.compile(u'[\U00010000-\U0010ffff\\uD800-\\uDBFF\\uDC00-\\uDFFF]')
    s = res.sub('', s)
    return s'''


def filtrate_emoji(s):  # 过滤表情
    s = emoji.demojize(s)  # emoji编码
    s = re.sub('(\:.*?\:)', '', s)
    return s


class Topic:
    def __init__(self, name, des, host, ranking, read_count, date, tag):
        self.name = name
        self.des = des
        self.host = host
        self.ranking = ranking
        self.read_count = read_count
        self.date = date
        self.tag = tag

    def get_name(self):
        return self.name

    def get_des(self):
        return self.des

    def get_host(self):
        return self.host

    def get_ranking(self):
        return int(self.ranking)

    def get_read_count(self):
        return strCount_To_intCount(str(self.read_count))

    def get_date(self):
        return self.date

    def get_tag(self):
        return self.tag

    def get_all(self):
        return [self.ranking, self.name, self.des, self.host, self.read_count, self.date]


def Crawl_first_two_pages_html():
    pages = list()
    session = HTMLSession()  # 创建会话
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/76.0.3809.132 Safari/537.36 ",
        'Cookie': 'SINAGLOBAL=5518713658911.554.1606869789556; UOR=www.baidu.com,s.weibo.com,login.sina.com.cn; '
                  'SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhI_ZTKTKrx3aVIQbmjgWJ65JpX5KMhUgL.FoqfS0q4e0'
                  '.X1K52dJLoIpzLxKqL1K.L1-2LxK.LBKzL1-Bt; ALF=1640070892; SSOLoginState=1608534890; '
                  'SCF=AmPaLkTvY32a2VbIRzA3iIcSg6A8X38y6BrJE2wyminWfEIguLXWoIG9qQiBiigJO7alQ9Cn3eef14BreQI8FXc.; '
                  'SUB=_2A25y5Ds9DeRhGeBL7FQY8yfIwjyIHXVRkCv1rDV8PUNbmtANLXjlkW9NRs0N0ia1ycLXybZGLLejo5Ti11znwI0d; '
                  '_s_tentry=login.sina.com.cn; Apache=144275179328.85437.1608534894739; '
                  'ULV=1608534894744:10:10:4:144275179328.85437.1608534894739:1608474027650; '
                  'webim_unReadCount=%7B%22time%22%3A1608534897861%2C%22dm_pub_total%22%3A0%2C%22chat_group_client%22'
                  '%3A0%2C%22chat_group_notice%22%3A0%2C%22allcountNum%22%3A0%2C%22msgbox%22%3A0%7D'  # 非游客登录cookie
    }
    """'Cookie': 'SINAGLOBAL=5518713658911.554.1606869789556; UOR=www.baidu.com,s.weibo.com,login.sina.com.cn; ' 
    'SCF=AmPaLkTvY32a2VbIRzA3iIcSg6A8X38y6BrJE2wyminWwxuywzPZgnQwfddk3O5_m8UhonyUWQNCrLgk7EDdlBI.; 
    SUB=_2AkMohPpudcPxrAVUn_4cz2LmZY1H-jybUZOYAn7uJhMyAxh87mwmqSVutBF-XMBpvjsZAhVtaxdAnvI3eznsZz9G; 
    SUBP=0033WrSXqPxfM72wWs9jqgMF55529P9D9WhI_ZTKTKrx3aVIQbmjgWJ65JpVF02R1h-NSh-Eehnp; _s_tentry=-; ' 
    'Apache=3040100749269.341.1608296369929; ' 
    'ULV=1608296369975:6:6:4:3040100749269.341.1608296369929:1608021578230; ' 
    'login_sid_t=76a7a4d086521fee02991f7480276119; cross_origin_proto=SSL; ' 
    'YF-Page-G0=d30fd7265234f674761ebc75febc3a9f|1608302165|1608302463 '  # 使用访客Cookie """
    for i in range(1, 3):
        logger.info("Crawling page %d!" % i)
        hot_topics_web = session.get(
            'https://d.weibo.com/231650?cfs=920&Pl_Discover_Pt6Rank__3_filter=&Pl_Discover_Pt6Rank__3_page=' + str(
                i) + '#Pl_Discover_Pt6Rank__3',
            headers=headers)
        hot_topics_web.html.render(timeout=10, sleep=10)  # 调用chromium内核充分渲染js
        hot_topics_web.encoding = 'utf-8'
        pages.append(hot_topics_web.html)
    session.close()
    return pages


def Find_ranking_list_tags(pages):
    tags = list()
    for page in pages:
        logger.info("Analysing page " + str(pages.index(page) + 1))
        soup = BeautifulSoup(page.html, "lxml")
        tags.extend(soup.find_all(class_="pic_txt clearfix"))  # 每一页所有榜单项的tag
    return tags


def Fetch_whats_needed(tags):
    topics = list()
    topic_date = datetime.datetime.now()
    for tag in tags:  # 在每个榜单项的tag下寻找
        if tags.index(tag) == 20:  # 只取前20
            break
        logger.info("Fetching tag " + str(tags.index(tag) + 1))
        topic_read_count = tag.find(class_='number').get_text(strip=True)
        topic_ranking = tag.find(class_=['DSC_topicon_red', 'DSC_topicon_orange', 'DSC_topicon']).get_text(
            strip=True)
        topic_ranking = ''.join(filter(str.isdigit, topic_ranking))  # 排行需要去除top等字母
        if tag.find(class_='tlink S_txt1') is not None:  # 考虑主持人不存在的情况
            topic_host = tag.find(class_='tlink S_txt1').get_text(strip=True)
        else:
            topic_host = '无'
        topic_des = tag.find(class_='subtitle').get_text(strip=True)
        topic_tag = tag.find(class_='W_btn_b W_btn_tag').get_text(strip=True)
        topic_name = tag.find(class_='pic')['alt'][1:-1]  # 切片去除#号
        topics.append(
            Topic(name=filtrate_emoji(topic_name), date=topic_date, des=filtrate_emoji(topic_des),
                  ranking=topic_ranking,
                  read_count=topic_read_count, host=topic_host, tag=topic_tag))
    return topics


def Save_to_database(topics):
    tag_mapper = TagMapper()
    topic_mapper = TopicMapper()
    raw_data_mapper = RawDataMapper()
    for topic in topics:
        logger.info("Saving tag " + str(topics.index(topic) + 1))
        if not tag_mapper.isTag(topic.get_tag()):
            tag_id = tag_mapper.addTag(topic.get_tag())
        else:
            tag_id = tag_mapper.getTagByName(topic.get_tag())

        if not topic_mapper.isTopic(topic.get_name()):
            topic_id = topic_mapper.addTopic(topic.get_name(), topic.get_des(), tag_id, topic.get_host())
        else:
            topic_id = topic_mapper.getTopicByName(topic.get_name())
        raw_data_mapper.addRawData(topic.get_date(), topic.get_ranking(), topic.get_read_count(), topic_id)
        sleep(0.5)


def Get_top_search():
    try:
        session = HTMLSession()  # 创建会话
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/76.0.3809.132 Safari/537.36 ",
            'Cookie': 'SINAGLOBAL=5518713658911.554.1606869789556; '
                      'SCF=AmPaLkTvY32a2VbIRzA3iIcSg6A8X38y6BrJE2wyminW2uRUUenrp1PMzcWGHffRTqyHBu64dK9eTsNtGeLkGTo.; '
                      '_s_tentry=login.sina.com.cn; Apache=3284140291002.333.1608984972819; '
                      'ULV=1608984972831:14:14:8:3284140291002.333.1608984972819:1608822014386; '
                      'WBStorage=8daec78e6a891122|undefined; '
                      'SUB=_2AkMou6UtdcPxrAVUn_4cz2LmZY1H-jybbszbAn7uJhMyAxh87gcpqSVutBF-XC0337lIfPRvTeugfWbMCKNRlqRl; '
                      'SUBP=0033WrSXqPxfM72wWs9jqgMF55529P9D9WhI_ZTKTKrx3aVIQbmjgWJ65JpVF02R1h-NSh-Eehnp; '
                      'login_sid_t=bd052ee79595e75e067ae5d84110f5c7; cross_origin_proto=SSL; '
                      'webim_unReadCount=%7B%22time%22%3A1608985118754%2C%22dm_pub_total%22%3A0%2C%22chat_group_client%22'
                      '%3A0%2C%22chat_group_notice%22%3A0%2C%22allcountNum%22%3A0%2C%22msgbox%22%3A0%7D; '
                      'UOR=www.baidu.com,s.weibo.com,www.baidu.com '  # 使用访客Cookie
        }
        top_search_web = session.get('https://s.weibo.com/top/summary')
        session.close()
        soup = BeautifulSoup(top_search_web.html.html, "lxml")
        search_list = soup.find(class_="data")
        rank_tag_list = search_list.findAll(class_='td-01')
        content_tag_list = search_list.findAll(class_='td-02')
        trial_tag_list = search_list.findAll(class_='td-03')
        rank_list = list()
        content_list = list()
        tail_list = list()
        for tag in rank_tag_list:
            rank_list.append(tag.get_text(strip=True))
        for tag in content_tag_list:
            tag = tag.find('a')
            content_list.append(filtrate_emoji(tag.get_text(strip=True)))
        for tag in trial_tag_list:
            tail_list.append(tag.get_text(strip=True))
    except Exception:
        return 0
    if len(list(zip(rank_list, content_list, tail_list))) == 51:
        return list(zip(rank_list, content_list, tail_list))
    else:
        return 0


if __name__ == '__main__':
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    rq = datetime.datetime.now().strftime('%Y%m%d%H%M')
    log_path = os.path.dirname(os.getcwd()) + '/Logs/DataCrawlingLogs/'
    log_name = log_path + rq + '.log'
    logfile = log_name
    fh = logging.FileHandler(logfile, mode='w+')
    fh.setLevel(logging.DEBUG)
    formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
    fh.setFormatter(formatter)
    logger.addHandler(fh)

    flag = 5  # 爬取尝试次数
    while flag > 0:
        try:
            logger.info("Trying,%dth time" % (3 - flag + 1))
            logger.info("Crawl_first_two_pages_html is running!")
            page_top_2 = Crawl_first_two_pages_html()
            if len(page_top_2) == 2:
                logger.info("Crawl_first_two_pages_html complete!")
            else:
                logger.error(
                    "Some errors happened in Crawl_first_two_pages_html!\tExcepted 2 pages,but get %d pages\n" % len(
                        page_top_2))
                flag -= 1
                continue
            logger.info("Find_ranking_list_tags is running!")
            tags_top_30 = Find_ranking_list_tags(page_top_2)
            if len(tags_top_30) == 30:
                logger.info("Find_ranking_list_tags complete!")
            else:
                logger.error(
                    "Some errors happened in Find_ranking_list_tags!\tExcepted 30 tags,but get %d tags\n" % len(
                        tags_top_30))
                flag -= 1
                continue

            logger.info("Fetch_whats_needed is running!")
            topic_top_20 = Fetch_whats_needed(tags_top_30)
            if len(topic_top_20) == 20:
                logger.info("Fetch_whats_needed complete!")
            else:
                logger.error(
                    "Some errors happened in Fetch_whats_needed!\tExcepted 20 topics,but get %d topics\n" % len(
                        topic_top_20))
                flag -= 1
                continue

            logger.info("Save_to_database is running!")
            Save_to_database(topic_top_20)
            logger.info("Save_to_database complete!")
            flag = -1  # 正常一次结束

        except Exception:
            logger.error("There have an ERROR!\n" + traceback.format_exc())
            flag -= 1
            continue

# print('***********************\n', hot_topic_tags[0].prettify())
'''list_file = open('topic backup//hot_topics_' + str(time.strftime("%Y-%m-%d_%H.%M.%S", time.localtime())) + '.txt', 'w+',
                 encoding='utf-8')
for topic in topic_top_20:
    list_file.write(' '.join(topic.get_all()))
list_file.close()'''
