import requests
from lxml import etree
from fake_useragent import UserAgent
import time
import random
from http.cookiejar import MozillaCookieJar
from urllib.request import HTTPCookieProcessor, build_opener, Request
from urllib.parse import urlencode


def getmovieId():
    while True:
        movieId = input("请输入您的目标电影编号：")
        if movieId == "" or movieId == None:
            print("您输入的电影编号不正确，请重新输入\n")
        else:
            try:
                movieId = int(movieId)
            except Exception:
                print("转换失败，编号输入错误！")
                continue
            print("getMovieId执行完成！")
            return movieId


def checkNone(text):
    e = etree.HTML(str(text))
    checkNull = e.xpath('//div[@class="review-list  "]/div')
    print("checkNone执行完成！")
    return len(checkNull)


def getCookie():
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
    }
    data_form = {
        "name": 17856416925,
        "password": "qipin521",
        "remember": False
    }
    login_url = "http://www.douban.com/stat.html?&action=login_click&platform=douban&login_click_time=1577018347043&callback=jsonp_oaxs3t2ytgdnucw"
    f_data = urlencode(data_form)
    request = Request(login_url, headers=headers, data=f_data.encode())
    cookie = MozillaCookieJar("cookie.txt")
    c_handler = HTTPCookieProcessor(cookie)
    opener = build_opener(c_handler)
    opener.open(request)
    cookie.save(ignore_expires=True, ignore_discard=True)
    print("getCookie执行完成")


def getComment(movieId):
    commentNum = 0
    count = 0
    headers = {
        "User-Agent": UserAgent().random
    }
    cookie = MozillaCookieJar()
    cookie.load("cookie.txt", ignore_discard=True, ignore_expires=True)
    c_handler = HTTPCookieProcessor(cookie)
    opener = build_opener(c_handler)
    for i in range(20000):
        count = i * 20
        search_url = "https://movie.douban.com/subject/{}/reviews?start={}".format(movieId, count)
        print("遍历网址：" + search_url)
        request = Request(search_url, headers=headers)
        time.sleep(random.random() * 5)
        response = opener.open(request)
        text = str(response.read().decode())
        if checkNone(text) == 0:
            break
        ee = etree.HTML(text)
        list = ee.xpath('//div[@class="main-bd"]/h2/a/@href')
        for url in list:
            # 每爬取一个评论都往文档里添加一下
            commentResponse = requests.get(url, headers=headers)
            print("评论网址：" + url)
            time.sleep(random.random() * 3)
            commentE = etree.HTML(commentResponse.text)
            commentlist = commentE.xpath('//div[@class="review-content clearfix"]/p/text()')
            comment = ""
            for c in commentlist:
                comment += c
            comment += "\n\n\n\n\n\n"
            with open("ciyun.txt", "a+") as f:
                f.write(comment.encode("gbk", 'ignore').decode("gbk", "ignore"))
                commentNum += 1
                print("添加了" + str(commentNum) + "个评论")


def main():
    movieId = getmovieId()
    getCookie()
    getComment(movieId)


if __name__ == '__main__':
    main()
