import re

import requests


def get_news():
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, sdch, br',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Cookie': '_ga=GA1.2.1295241282.1555764994; _ntes_nuid=ba6c01094ad014ee67291cbd93fbf9ca; _ntes_nnid=cdb6628bbe04b0309ff818ea7c47119e,1561017743195; NNSSPID=e2f760b6a8494ad4ba99f7987b3cab06; NTES_hp_textlink1=old; ne_analysis_trace_id=1561018193650; Province=0; City=0; vinfo_n_f_l_n3=bd4ac504432134d2.1.1.1561017743224.1561019652855.1561019887629; s_n_f_l_n3=bd4ac504432134d21561019791428',
        'Host': 'tech.163.com',
        'If-Modified-Since': 'Thu, 20 Jun 2019 08:08:01 GMT',
        'Referer': 'http://tech.163.com/',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0',
    }

    # 获取科技新闻
    try:
        turl = requests.get('https://tech.163.com/special/00097UHL/tech_datalist.js?callback=data_callback', headers=headers)
        turl.encoding = turl.apparent_encoding
        soup2 = turl.text[50:-2]

        str1 = re.findall(r'title.*', soup2)  # 获取新闻标题
        str2 = re.findall(r'docurl.*', soup2)  # 获取新闻链接
    except Exception as e:
        print(f"新闻信息获取失败: {e}")
        str1, str2 = [], []
    # 整合信息
    top10 = {}
    for i in range(min(18, len(str1))):  # 确保新闻数量不超过18条
        top10[f"{str1[i][8:-2]}"] = f"{str2[i][9:-2]}"
    return top10

if __name__ == '__main__':
    print(get_news())
