import requests
from bs4 import BeautifulSoup
import csv
def get_html(url):
    head = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"}
    try:
        r=requests.get(url=url,headers=head)
        r.encoding=r.apparent_encoding
        r.raise_for_status()
        return r.text
    except Exception as e:
        print(e)
def parser(html):
    soup=BeautifulSoup(html,"lxml")
    out_list=[]
    for row in soup.select("#pane-news ul>li"):
        row_data=[
            row.select('a')[0].text.replace(' ','').replace('\n',''),
            row.select('a')[0].attrs['href'].replace(' ','').replace('\n','')
        ]
        out_list.append(row_data)
    return out_list
def save_csv(path,content):
    with open(path,"a+",newline='',encoding="utf-8")as l:
        csv_write=csv.writer(l)
        csv_write.writerows(content)
if __name__ == '__main__':
        url = 'http://www.bspider.top/baidunews'
        html=get_html(url)
        out_list=parser(html)
        path='d:\\赵华超.csv'
        save_csv(path,out_list)