#!/usr/bin/python
import requests
from bs4 import BeautifulSoup
import pandas as pd
from wordcloud import WordCloud
import matplotlib.pyplot as plt

list = []
wordList = ""


def openUrl(url):
    response = requests.get(url)
    # 确保请求成功
    if response.status_code == 200:
        # 使用BeautifulSoup解析网页内容
        soup = BeautifulSoup(response.text, "html.parser")
        # 查找并打印你感兴趣的HTML元素，例如标题或段落
        title_tag = soup.title
        print("Title:", title_tag)

        p_tags = soup.find_all("div", class_="category-wrap_iQLoo")

        for tag in p_tags:
            title = tag.find("div", class_="c-single-text-ellipsis")
            # print("标题:", title.get_text())  # 打印每个段落的文本内容
            content = tag.find("div", class_="hot-desc_1m_jR").get_text()
            content = content.replace("查看更多>", "")
            # print("内容:", content.get_text())  # 打印每个段落的文本内容
            num = tag.find("div", class_="hot-index_1Bl1a")
            # print("热搜指数:", num.get_text())  # 打印每个段落的文本内容
            # 获取封面图地址
            img = tag.find("div", class_="index_1Ew5p").find_next_sibling().attrs.get("src")

            urls = tag.find("a", class_="look-more_3oNWC").attrs.get("href")

            map = {'title': title.get_text(), "content": content, "num": num.get_text(), "img": img, "url": urls}
            list.append(map)
            global wordList
            wordList += str(title.get_text())

        return list
    else:
        print("请求失败，状态码:", response.status_code)


def downloadFile():
    # 创建一个空的DataFrame
    df = pd.DataFrame()
    titles = []
    contents = []
    nums = []
    for item in list:
        title = item["title"]
        content = item["content"]
        num = item["num"]
        content = content.replace("查看更多>", "")
        titles.append(title)
        contents.append(content)
        nums.append(num)
    # 创建数据字典，其中包含表格中的数据
    data = {
        '标题': titles,
        '内容': contents,
        '热搜指数': nums
    }
    # 将数据字典转换为DataFrame
    df = pd.DataFrame(data)

    # 将DataFrame写入CSV文件 index=False 去掉标号
    df.to_csv('C:\\Users\86183\Desktop\应用文件夹\data.csv', encoding='GBK', index=False)
    # df.to_csv('C:\\Users\86183\Desktop\应用文件夹\data.csv', index=False)

    # df.ExcelWriter('C:\\Users\86183\Desktop\应用文件夹\data.xlsx', encoding='GBK', index=False)


def downloadFile2():
    titles = []
    contents = []
    nums = []
    for item in list:
        title = item["title"]
        content = item["content"]
        num = item["num"]
        title = title.replace("查看更多>", "")
        titles.append(title)
        contents.append(content)
        nums.append(num)
    # 创建数据字典，其中包含表格中的数据
    data = {
        '标题': titles,
        '内容': contents,
        '热搜指数': nums
    }
    # 创建一个简单的 DataFrame
    df = pd.DataFrame(data)
    # 创建一个 ExcelWriter 对象
    writer = pd.ExcelWriter('output.xlsx')

    # 将 DataFrame 写入 Excel 文件
    df.to_excel(writer, sheet_name='Sheet1', index=False)

    # 保存 Excel 文件
    writer._save()


def words(data):
    if data.isspace():
        # 读取文本文件
        with open("new5.txt", "r", encoding="utf-8") as f:
            text = f.read()
    else:
        text = str(data)
    font_path = 'STFANGSO.TTF'

    # 创建WordCloud对象并生成词云
    wordcloud = WordCloud(width=1000, height=800, background_color="white", font_path=font_path,
                          min_font_size=12).generate(text)

    # 显示词云图像
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis("off")
    plt.show()


def do_image():
    openUrl("https://top.baidu.com/board?tab=realtime")
    font_path = 'STFANGSO.TTF'
    # 创建WordCloud对象并生成词云
    wordcloud = WordCloud(width=1000, height=800, background_color="white", font_path=font_path,
                          min_font_size=12).generate(wordList)

    return wordcloud


if __name__ == '__main__':
    openUrl("https://top.baidu.com/board?tab=realtime")
    words(wordList)
