# 豆瓣影评
# 环境准备
# 框架 requests、bs4、jieba、pyecharts.WordCloud


def getComment(i):

    header1 = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
        "Host": "movie.douban.com"
    }

    header2 = {
        "User-Agent": "Mozilla / 5.0(Windows NT 10.0;WOW64;Trident / 7.0;rv: 11.0) like Gecko",
        "Host": "movie.douban.com"
    }

    # 格式化字符 %d整数 %s字符串
    url = "https://movie.douban.com/subject/26322774/comments?start=%d&limit=20&sort=new_score&status=P"%(i*20)
    resp = requests.get(url=url,headers=header1)
    print("code-",resp.status_code,resp.url)
    soup = BeautifulSoup(resp.text,"html.parser")
    div = soup.find(name="div",attrs={"id":"comments"})
    div_list = div.find_all(name="div",attrs={"class":"comment-item"})
    for div in div_list:

        author,score,comment_txt = "","",""
        try:
            comment = div.find(name="div",attrs={"class":"comment"})
            comment_txt = comment.find(name="span",attrs={"class":"short"}).text.strip()
            commentInfo = div.find(name="span", attrs={"class": "comment-info"})
            author =  commentInfo.find(name="a").text.strip()
            # 可能存在不打分清空，需要异常处理

            score = commentInfo.find(name="span",attrs={"class":"rating"}).attrs.get("title").strip()
        except Exception as e:
            print("没有打分")

        # 保存包列表中
        comment_list.append({"author":author,"score":score,"comment_txt":comment_txt})

    pass

def saveCsv():
    # 创建csv文件
    file = open("逐梦演艺圈.csv","w",newline="",encoding="utf-8-sig")
    # 转换文件的写入方式
    csv_file = csv.writer(file)
    # 迭代器按行写入数据
    for comment in comment_list:
        csv_file.writerow([comment["author"],comment["score"],comment["comment_txt"]])
    # 清空缓存和关闭文件
    file.flush()
    file.close()
    print("保存成功",comment_list)

def readCsv():
    stars = ("很差","较差","还行","推荐","力荐")
    # 读取csv文件
    file = open("逐梦演艺圈.csv","r",encoding="utf-8-sig")
    # 改变file的读取方式
    csv_file = csv.reader(file)
    # 清空之前的数据
    comment_list.clear()
    # 迭代器 行
    for row in csv_file:
        # 判断打分必须满足在stars元组内的数据
        if row[1] in stars:
           comment_list.append(row[2])


def generateWordCloud():
    allComment = "" # 拼接所有的评论
    readCsv()
    for comment in comment_list:
        allComment+=comment # 拼接所有的评论

    # 解霸 1 分词处理
    cut = jieba.cut(allComment)

    # 2 统计词汇总量
    c_dict = Counter(cut)
    print(c_dict)

    # 3 把字典转换成二维列表
    word_list = [[key,value] for key,value in c_dict.items()]
    print(word_list)

    # 绘制词云
    wordCloud = (
        WordCloud()
            .add(series_name="热点分析", data_pair=word_list, word_size_range=[6, 66])
            .set_global_opts(
            title_opts=options.TitleOpts(
                title="逐梦演艺圈词云",
                title_textstyle_opts=options.TextStyleOpts(font_size=23)
            ),
            tooltip_opts=options.TooltipOpts(is_show=True),
        )
    )
    wordCloud.render("逐梦演艺圈词云.html")

if __name__ == '__main__':
    import requests
    from bs4 import BeautifulSoup
    import csv # csv表格，创建和读取csv文件
    import jieba # 分词统计框架
    from pyecharts.charts import WordCloud # 词云
    from pyecharts import options
    import time,random
    from collections import Counter # 统计的内置函数

    comment_list = []

    # 访问10个页面的评论
    for i in range(0,10):
        # 睡眠间隔时间
        time.sleep(random.random()*3+1)
        getComment(i)

    # 保存本地csv文件
    saveCsv()

    generateWordCloud()
