# -*- coding: utf-8 -*-
"""
Created on Tue Sep 26 15:28:31 2017

@author: deanchen
"""

import urllib
from urllib import *
from bs4 import BeautifulSoup as bs
import re
import jieba #分词包
import pandas as pd
import numpy #numpy计算包
import codecs
import matplotlib.pyplot as plt
#%matplotlib inline
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)
from wordcloud import WordCloud

#分析网页函数
def getNowPlayingMovie_list():
    resp = urllib.urlopen('https://movie.douban.com/cinema/nowplaying/beijing/')
    html_data = resp.read().decode('utf-8')
    soup = bs(html_data,'html.parser')
    nowplaying_movie = soup.find_all('div',id = 'nowplaying')
    nowplaying_movie_list = nowplaying_movie[0].find_all('li',class_='list-item')
    
    nowplaying_list = []
    for item in nowplaying_movie_list:
        nowplaying_dict = {}
        nowplaying_dict['id'] = item['data-subject']
        for tag_img_item in item.find_all('img'):
            nowplaying_dict['name'] = tag_img_item['alt']
            nowplaying_list.append(nowplaying_dict)
    #nowplaying_list包含电影ID和name，不过是unicode
    #print nowplaying_list
    #打印出电影名字
    for item in nowplaying_list:
        movie_name = item['name']
        print movie_name
    return nowplaying_list

#爬取指定电影的评论，根据电影ID查看
def getCommentsByID(movieID,pageNum):
    
    eachCommentList = []
    if pageNum > 0:
        start = (pageNum-1)*20
    else:
        return False
    
    #指定电影link，查看20条评论
    requrl = 'https://movie.douban.com/subject/' + movieID + '/comments' +'?' +'start=' + str(start) + '&limit=20' 
    #print requrl
    resp = urllib.urlopen(requrl)
    html_data = resp.read().decode('utf-8')
    soup = bs(html_data,'html.parser')
    comment_div_list = soup.find_all('div',class_='comment')
    
    #将HTML中评论部分取出来，格式为<class 'bs4.element.NavigableString'>
    for item in comment_div_list:
        if item.find_all('p')[0].string is not None:
            #print item.find_all('p')[0].string
            #print type(item.find_all('p')[0].string)
            eachCommentList.append(item.find_all('p')[0].string)
    
    #comments = ''
    #for k in range(len(eachCommentList)):
    #    comments = comments + (unicode(eachCommentList[k])).strip()
    #print comments
    #print type(comments)
    return eachCommentList
    
def main():
    #循环获取第一个电影的前10页评论，根据NowPlayingMovie_list[0]控制选择list中第几个电影
    commentList = []
    NowPlayingMovie_list = getNowPlayingMovie_list()
    #print NowPlayingMovie_list[0]['id']
    #print type(NowPlayingMovie_list)
    #print NowPlayingMovie_list
    for i in range(10):
        num = i + 1
        commentList_temp = getCommentsByID(NowPlayingMovie_list[0]['id'],num)
        for item in commentList_temp:
            commentList.append(item)
        
    #print commentList
    #print type(commentList)   
            
            
    #将列表中的List中数据转换为unicode
    comments = ''
    for k in range(len(commentList)):
        #print (unicode(commentList[k])).strip()
        comments = comments + (unicode(commentList[k])).strip()
    #print comments
    #print type(comments)
    
    #去掉标点符号,注意因为comments是unicode，正则表达式r之前需要添加u；
    pattern = re.compile(ur'[\u4e00-\u9fa5]+')
    filterdata = re.findall(pattern,comments)
    cleaned_comments = ''.join(filterdata)
    #print cleaned_comments
    
    #使用结巴分词
    segment = jieba.lcut(cleaned_comments)
    words_df = pd.DataFrame({'segment':segment})
    #print words_df.head()
    
    #使用stopwords.txt将一些虚词过滤掉,txt保存在脚本路径
    stopwords = pd.read_csv("stopwords.txt",index_col=False,quoting=3,sep="\t",names=['stopword'],encoding='utf-8')
    words_df = words_df[~words_df.segment.isin(stopwords.stopword)]
    #print words_df.head()
    
    #词频统计，使用numpy计算包
    words_stat = words_df.groupby(by=['segment'])['segment'].agg({"计算":numpy.size})
    words_stat = words_stat.reset_index().sort_values(by=["计算"],ascending=False)
    #print words_stat.head()
    
    #使用词云显示
    wordcloud=WordCloud(font_path="simhei.ttf",background_color="white",max_font_size=80)
    word_frequence = {x[0]:x[1] for x in words_stat.head(1000).values}
    wordcloud=wordcloud.fit_words(word_frequence)
    
    plt.imshow(wordcloud)
    plt.show()

main()