#获取每日8点新闻
import requests
from bs4 import BeautifulSoup
import itertools,json

###拼接链接  获取到最新日期新闻的URL
url = 'https://search.cctv.com/search.php?qtext=8%E7%82%B9%E8%A7%81&page=1&type=web&sort=date&datepid=1&channel=&vtime=-1'
response = requests.get(url)
response.encoding = 'utf-8' # 或者其他正确的编码方式
content = response.text
# 使用BeautifulSoup解析网页内容
soup = BeautifulSoup(content, 'html.parser')
# 提取今日目标链接
link_element = soup.find(class_='tit')
link = link_element.span.attrs
# print(link["lanmu1"])

# 发起HTTP请求获取网页内容
# url = 'https://news.cctv.com/2023/05/26/ARTI1vrxKsyEo0PBYrgiYtvI230526.shtml'
url = link["lanmu1"]

result_str = "【央视早8点】"
response = requests.get(url)
response.encoding = 'utf-8' # 或者其他正确的编码方式

# 解析网页内容
soup = BeautifulSoup(response.text, 'html.parser')

# 提取新闻内容
news_content = soup.find('div', class_='content_area').get_text()

# 打印新闻内容
# print(news_content)

# 使用split方法根据·进行分行
lines = news_content.split('·')

# 打印分行后的结果带上价格序号
for i, line in enumerate(itertools.islice(lines,21), start=0):
    print(f"{i}. {line.strip()}")
    result_str += f'\n{i}. {line.strip()}'

#发送vx消息
url = 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=d230f478-24eb-4780-a45d-0e8fe951bded'
headers = {'Content-Type': 'application/json'}
#文本
#带超链接
#article ="<a href=\"http://baidu.com\">邮件中心视频实况</a>"


data = {
    "msgtype": "text",
    "text": {
        "content": result_str
    },
    "safe": 0,
    "enable_id_trans": 0,
    "enable_duplicate_check": 0,
    "duplicate_check_interval": 1800
}
response = requests.post(url, headers=headers, data=json.dumps(data))
