import pymongo
import csv
import random
import ssl
import time
from urllib import request
from bs4 import BeautifulSoup
def getComment(url):
    # 火狐 谷歌  Safari 的头部
    header1 = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
        "Host": "movie.douban.com"
    }
    # ie浏览器的请求头
    header2 = {
        "User-Agent": "Mozilla / 5.0(Windows NT 10.0;WOW64;Trident / 7.0;rv: 11.0) like Gecko",
        "Host": "movie.douban.com"
    }
    headlist = [header1, header2]
    # 定义随机下标
    index = random.randint(0, 1)
    # 确定访问的请求头
    # index  是一个随机数
    realHeader = headlist[index]
    req = request.Request(url, headers=realHeader)
    # 处理https协议的，http，https（更加安全协议）可以解决ssl报错问题
    context = ssl._create_unverified_context()
    # 获取 网页的 文本对象
    html = request.urlopen(req, context=context)
    #  获取网页源码
    contents = html.read().decode("utf-8")
    return contents
def read_data(contents):
    soup = BeautifulSoup(contents, 'lxml')
    # 找到了大的容器，里面包含很多item项
    comments_div = soup.select_one('#comments')  # id   返回Tag类型
    # print(comments_div)
    # 获取大的容器中的每个小项，返回一个结果集列表
    comment_list = comments_div.select('.comment-item')  # 返回ResultSet
    # print(comment_list)
    comment_dict_list = []
    # 遍历每个item项，
    for comment in comment_list:
        # find 与 find_all
        # attribute:属性
        comment_div = comment.find('div', attrs={'class': 'comment'})
        # print(comment_div)
        comment_info = comment_div.find('span', attrs={'class': 'comment-info'})
        # 评价的用户名
        username = comment_info.find('a').text
        # 获取星级
        star = comment_info.find_all('span')[1].get('title')
        # 评论内容
        comment_text = comment_div.find('span', attrs={'class': 'short'}).text
        # 字典存放
        comment_dict = {'username': username, 'star': star, 'comment': comment_text}
        # 将字典存到列表中
        comment_dict_list.append(comment_dict)
        # [{},{},{},{},{}]
    # 返回字典
    return comment_dict_list
def get_collection():
    client=MongoClient('mongodb://localhost:27017/')
    db=client['douban']
    collection=db.movies
    return collection
# 将其保存到csv文件中
def save_comment_csv(comment_dict_list,collection):
    for comment_dict in comment_dict_list:
      collection.insert(comment_dict)
if __name__ == '__main__':
    collection = get_collection()
    for i in range(20):
        start = str(i * 20)  # 20 40 60 80 100
        url = 'https://movie.douban.com/subject/34841067/comments?start=' + start + '&limit=20&sort=new_score&status=P'
        # 将 新的url以参数的形式赋值给getComment()
        contents = getComment(url)
        # 解析爬取到的数据
        comment_dict_list = read_data(contents)
        # 保存到一个csv文件中
        save_comment_csv(comment_dict_list,collection)
        print('保存成功第' + str(i + 1) + '页内容')
        # 延时
        time.sleep(5)