# coding:utf-8
import urllib3
from urllib3 import PoolManager
from bs4 import BeautifulSoup
import pandas as pd
import json
#     "2509": "全部"
#     "2510": "国内"
#     "2511": "国际"
#     "2669": "社会"
#     "2512": "体育"
#     "2513": "娱乐"
#     "2514": "军事"
#     "2515": "科技"
#     "2516": "财经"
news_type_id = "2515"
news_limit_count = 150

http = PoolManager()
urllib3.disable_warnings()

news_urls = []
page = 1
while True:
    url = "https://feed.mix.sina.com.cn/api/roll/get?pageid=153&lid=%s&k=&num=50&page=%d" % (news_type_id, page)
    result = http.request(method="GET", url=url)
    json_data = json.loads(result.data.decode())
    for item in json_data["result"]["data"]:
        news_urls.append(item["url"])
    if(len(news_urls) >= news_limit_count):
        break
    else:
        page = page + 1
        continue
news_urls = news_urls[:news_limit_count]

news_items = []
for news_url in news_urls:
    result = http.request(method="GET", url=news_url)
    soup = BeautifulSoup(result.data.decode(), "html.parser")
    if soup.find("h1", attrs={"class":"main-title"}):
        title = soup.find("h1", attrs={"class":"main-title"}).text.strip()
        publish_time = soup.find("span", attrs={"class":"date"}).text.strip()
        content = soup.find("div", attrs={"class":"article"}).text.strip()
        news_items.append([title, publish_time, content, news_url])
   
df = pd.DataFrame(news_items, columns=["标题", "发布时间", "新闻内容", "链接地址"])
df.to_excel("d:/新闻.xlsx")
