# coding: utf-8
import requests
from bs4 import BeautifulSoup
import time
import os
import csv
# -----------------------------------------------------文件存储---------------------------------------------------------
# 设置日期
today = time.strftime('%Y-%m-%d')
# 设置存储位置
path = os.getcwd() + '/' + today + '-Comment.csv'
# 数据存储
csvfile = open(path, 'a', encoding='utf-8')
writer = csv.writer(csvfile)
writer.writerow(('评论内容',))
# -----------------------------------------------------网页下载---------------------------------------------------------
# 要爬取的网址
url = 'http://localhost:8084/weibo/comment.html'
# 下载网页，返回相应对象
response = requests.get(url)
response.encoding='utf-8'
# 判断网址是否访问成功 响应码200下载成功
if response.status_code == 200:
    html = response.text

# -----------------------------------------------------网页解析---------------------------------------------------------
# 构建解析对象
soup = BeautifulSoup(html, 'html.parser')
comment_datas = soup.find_all('div', class_='card-wrap')
# 解析h3标签内容
tag = 'div .m-text-box > h3'
for a in comment_datas:
    for c in a.select(tag):
        comment = c.string
        print(comment)
        position = (comment)
        writer.writerow((position,))
csvfile.close()