#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/5/27 15:59
# @Author : CoderXYX
# @Site : 
# @File : weibo_houlai_spider.py
# @Software: PyCharm

import re
import os
import json
import time
import random

import requests
import jieba.analyse
from pyecharts import options as opts
from pyecharts.globals import SymbolType
from pyecharts.charts import WordCloud

# 词云字体
WC_FONT_PATH = '/Fonts/STXINWEI.TTF'
# 影评数据保存文件
TOPIC_FILE_PATH = 'weibo_houlai_topic.txt'
# 需要清洗的词
STOP_WORDS_FILE_PATH = 'stop_words.txt'

# 生成Session对象，用于保存Cookie
s = requests.Session()


def login_sina():
    """
    登录新浪
    :return:
    """
    # 登录URL
    login_url = 'https://passport.weibo.cn/sso/login'
    # 请求头
    headers = {'user-agent': 'Mozilla/5.0',
               'Referer': 'https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=https%3A%2F%2Fm.weibo.cn%2F'}
    # 传递用户名和密码
    data = {'username': 'xue752834620@126.com',
            'password': 'byzlhd1124HUST',
            'savestate': 1,
            'entry': 'mweibo',
            'mainpageflag': 1}
    try:
        r = s.post(login_url, headers=headers, data=data)
        r.raise_for_status()
    except:
        print('登录请求失败')
        return 0
    # 打印请求结果
    print(r)
    return 1


def spider_topic(page):
    """
    爬取新浪微博话题“#我们都在奔赴各自不同的人生#”
    :return:
    """
    topic_url = 'https://m.weibo.cn/api/container/getIndex?containerid=231522type%3D61%26q%3D%23%E6%88%91%E4%BB%AC%E9%83%BD%E5%9C%A8%E5%A5%94%E8%B5%B4%E5%90%84%E8%87%AA%E4%B8%8D%E5%90%8C%E7%9A%84%E4%BA%BA%E7%94%9F%23%26t%3D0&page_type=searchall&page={0}'.format(
        page)
    kv = {'User-Agent': 'Mozilla/5.0',
          'Referer': 'https://m.weibo.cn/search?containerid=231522type%3D1%26q%3D%23%E6%88%91%E4%BB%AC%E9%83%BD%E5%9C%A8%E5%A5%94%E8%B5%B4%E5%90%84%E8%87%AA%E4%B8%8D%E5%90%8C%E7%9A%84%E4%BA%BA%E7%94%9F%23'}
    try:
        # timeout:增加超时，防止请求一直等待导致堵死
        r = s.get(url=topic_url, headers=kv, timeout=5)
        r.raise_for_status()
    except:
        print('爬取失败')
        return
    # 2.解析数据
    r_json = json.loads(r.text)
    cards = r_json['data']['cards']

    if len(cards) != 0:
        # 把一起请求的微博内容放在一起，后面一次写入
        text_list = []
        for card in cards:
            # 获取微博内容
            mblog = card['mblog']
            # print(mblog)
            # 过滤html标签，留下内容
            text = re.compile(r'<[^>]+>', re.S).sub(' ', mblog['text'])
            # 除去无用开头信息
            strinfo = re.compile(r'#.*?#')
            text = strinfo.sub('', text).strip()
            text_list.append(text)
            print(text)
        # 一次写入文件
        with open(TOPIC_FILE_PATH, 'a+', encoding='utf-8') as file:
            file.write('\n'.join(text_list))
        return 1
    else:
        print('爬取完毕')
        return 0


def patch_spider_topic():
    # 爬取前先登录，登录失败则不爬取
    if not login_sina():
        return
    # 写入数据前先清空之前的数据
    if os.path.exists(TOPIC_FILE_PATH):
        os.remove(TOPIC_FILE_PATH)
    # 批量爬取
    for i in range(1, 100):
        print('第%d页' % i)
        print('--------------------------------------------------------------------------')
        a = spider_topic(i)
        # 设置一个时间间隔
        time.sleep(random.randint(1, 3))
        print('--------------------------------------------------------------------------')
        if a == 0:
            break;


def analysis_sina_content():
    """
    分析微博内容
    :return:
    """
    with open(TOPIC_FILE_PATH, mode="r", encoding='utf-8') as file:
        comment_txt = file.read()
        # print(comment_txt)
    # 数据清洗，去掉无效词
    jieba.analyse.set_stop_words(STOP_WORDS_FILE_PATH)
    # 词数统计
    words_count_list = jieba.analyse.textrank(comment_txt, topK=50, withWeight=True)
    print(words_count_list)
    # 生成词云
    word_cloud = (
        WordCloud()
            .add("", words_count_list, word_size_range=[20, 100], shape=SymbolType.ROUND_RECT)
            .set_global_opts(title_opts=opts.TitleOpts(title="后来—我们都在奔赴各自不同的人生"))
    )
    word_cloud.render('word_cloud.html')


if __name__ == '__main__':
    # login_sina()
    # spider_topic()
    # patch_spider_topic()
    analysis_sina_content()
    # create_word_cloud()
