import requests as rq
from bs4 import BeautifulSoup
import csv
alllater=[]
def getHTML(url):
    try:
        re = rq.get(url, timeout=30)
        re.raise_for_status()
        re.encoding = 'utf-8'
        return re.text
    except:
        print('有异常')
        return ' '

def gethtmldata(soup):
        trs = soup.find('div', id='vsb_content').find_all('tr')
        for tr in trs:
            tds=tr.find_all('td')
            p = tr.find_all('p')
            if (len(tds) < 3):
                alllater.append(['',p[0].string, p[1].string])
            else:
                alllater.append([p[0].string, p[1].string, p[2].string])

def writedatatocsv(alllater):
    with open('data2.csv', 'w', encoding='utf-8') as file:
        writer = csv.writer(file)
        # writer.writerow(["日期","标题","内容"])
        num = len(alllater)
        for i in range(num):
            u = alllater[i]
            writer.writerow([u[0],u[1],u[2]])



def main():
    url = 'http://www.zjitc.net/rcpy/jpkc.htm'
    html = getHTML(url)
    soup = BeautifulSoup(html,"html.parser")
    gethtmldata(soup)
    writedatatocsv(alllater)
main()