#该文件用于获取 首页、综合、科技、娱乐、社区、财经、开发、报刊共八个页面各自的hotlist，以datalist的形式返回
import pandas as pd
from rouge import Rouge
import datetime
import pymysql
import requests
import time
import pandas
import re
import xlwt
import to_sql #写入数据库函数

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from urllib import request
from urllib import parse
from bs4 import BeautifulSoup


from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
rouge=Rouge()

#获取首页所有hotlist的函数
def get_html(url):
    headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
    resp = requests.get(url, headers=headers)
    return resp.text


def get_data(html):
    soup = BeautifulSoup(html, 'html.parser')
    nodes = soup.find_all('div', class_='cc-cd')
    return nodes


def get_node_data(df, nodes):
    for node in nodes:
        litte_node = node.find('div', class_='cc-cd-lb')
        source = litte_node.text.strip()
        image = litte_node.find('img')['src']
        messages = node.find('div', class_='cc-cd-cb-l nano-content').find_all('a')
        for message in messages:
            if(message.find('span', class_='t')):
                content = message.find('span', class_='t').text.strip()
                data = {
                    'content': [content],
                    'url': [message['href']],
                    'source': [source],
                    'image_icon': [image]
                }
                item = pd.DataFrame(data)
                df = pd.concat([df, item], ignore_index=True)
    return df



#获取 综合1、科技2、娱乐3、社区4、财经5、开发6、报刊7 中的某个页面的dataframe
def get_hotlist():
    url = 'https://tophub.today/'#多个门户网站爬取
    html = get_html(url)
    data = get_data(html)
    res = pd.DataFrame()
    res = get_node_data(res, data)
    return res


