import requests
from bs4 import BeautifulSoup
import xlwt
import datetime

def request_juchao(url, headers):
    try:
        response = requests.get(url=url, headers=headers)
        if response.status_code == 200:
            return response.text
    except requests.RequestException:
        return None

book=xlwt.Workbook(encoding='utf-8',style_compression=0)
sheetName = '巨潮'
sheet=book.add_sheet(sheetName,cell_overwrite_ok=True)
sheet.write(0, 0, '代码')
sheet.write(0, 1, '简称')
sheet.write(0, 2, '公告标题')
sheet.write(0, 3, '日期')
n=1
def save_to_excel(soup):
    global n
    list = soup.find(class_='table jc-table').find_all('tr')
    for item in list:
        tds = item.find_all('td')
        if len(tds) == 0:
           continue
        dm = tds[0].text.replace('\n','')
        jc = tds[1].text.replace('\n','')
        ggbt = tds[2].text.replace('\n','')
        rq = tds[3].text.replace('\n','')
        print('公告：' + dm + ' | ' + jc  +' | ' + ggbt + '|' + rq)
        sheet.write(n, 0, dm)
        sheet.write(n, 1, jc)
        sheet.write(n, 2, ggbt)
        sheet.write(n, 3, rq)
        n = n + 1

def main():
    head = {
        
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36"}
    url = 'http://www.cninfo.com.cn/new/index'
    html = request_juchao(url, head)
    soup = BeautifulSoup(html, 'lxml')
    save_to_excel(soup)


if __name__ == '__main__':
    main()

book.save(u'巨潮.xlsx')