import requests
import re
import traceback
from bs4 import BeautifulSoup   #解析器
from datetime import datetime
import os
import sqlite3

from matplotlib.pyplot import title


# url = 'https://roll.news.sina.com.cn/news/gnxw/gdxw1/index.shtml'
#
#
# def getnewspage(url):
#     res = requests.get(url)
#     res.encoding = 'utf-8'
#     return res.text
#
#
# def savepage(page, filename):
#     f = open(filename, 'wb')
#     f.write(page.encode('utf-8'))
#     f.close()


# def get_news(htmlfile):
#     news_dict = {}
#     try:
#         f = open(htmlfile, encoding='utf-8')
#         fcontent = f.read()
#         soup = BeautifulSoup(fcontent, 'lxml')
#         news_title = soup.select('h1.main-title')[0].text.strip()
#         news_dict['tittle'] = news_titlle
#         nt = datetimr.strptime(soup.select('span.date')[0].text, '%a %b %d %Y')
#         news_dict['date'] = nt.strftime('%Y-%m-%d')
#
#     except Exception as e:
#         print("抓取出错")
#         print(e)
#         traceback.print_exc()
#         return None
#     return news_dict
#
# def save_text(filename, newslist):
#     newslist = ''
#     for n in newlsist:
#         newstext += n['article'] + '\r\n\n'
#
#
#     f = open(filename, 'wb')
#     f.write(newstext.encode('utf-8'))
#     f.close()
#
#
# if __name__ == 'main':


