import requests
from bs4 import BeautifulSoup
from urllib.parse import unquote

cookies = {
    'll': '"108296"',
    'bid': '8JQPwZZT6Do',
    '_pk_ref.100001.8cb4': '%5B%22%22%2C%22%22%2C1698373591%2C%22https%3A%2F%2Fcn.bing.com%2F%22%5D',
    '_pk_id.100001.8cb4': '88afbaf26fd1b464.1698373591.',
    '_pk_ses.100001.8cb4': '1',
    '__utma': '30149280.2145246926.1698373592.1698373592.1698373592.1',
    '__utmc': '30149280',
    '__utmz': '30149280.1698373592.1.1.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/',
    '__utmt': '1',
    'ap_v': '0,6.0',
    '_ga': 'GA1.2.217034867.1698373596',
    '_gid': 'GA1.2.240010009.1698373596',
    '_ga_Y4GN1R87RG': 'GS1.1.1698373595.1.0.1698373868.0.0.0',
    '__utmb': '30149280.10.10.1698373592',
}

headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Connection': 'keep-alive',
    # 'Cookie': 'll="108296"; bid=8JQPwZZT6Do; _pk_ref.100001.8cb4=%5B%22%22%2C%22%22%2C1698373591%2C%22https%3A%2F%2Fcn.bing.com%2F%22%5D; _pk_id.100001.8cb4=88afbaf26fd1b464.1698373591.; _pk_ses.100001.8cb4=1; __utma=30149280.2145246926.1698373592.1698373592.1698373592.1; __utmc=30149280; __utmz=30149280.1698373592.1.1.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmt=1; ap_v=0,6.0; _ga=GA1.2.217034867.1698373596; _gid=GA1.2.240010009.1698373596; _ga_Y4GN1R87RG=GS1.1.1698373595.1.0.1698373868.0.0.0; __utmb=30149280.10.10.1698373592',
    'Referer': 'https://www.douban.com/',
    'Sec-Fetch-Dest': 'document',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Site': 'same-origin',
    'Sec-Fetch-User': '?1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
    'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
}


movie_name = input('请输入电影名称：')

params = {
    'q': movie_name,
}

response = requests.get('https://www.douban.com/search', params=params, cookies=cookies, headers=headers)
bs = BeautifulSoup(response.text,"html.parser")

parser_result = bs.find('div','result-list').find('div','result').find('div','content').find('div','title')


# 获取标题
movie_title = parser_result.find('a').text
# 获取跳转链接
movie_link = unquote(parser_result.find('h3').find('a').get('href')).split('url=')[1].split('&query=')[0]
# 获取 评分
movie_score =parser_result.find('div','rating-info').find('span','rating_nums').text
# 获取评论数
movie_comment_number = parser_result.find('div','rating-info').find_all('span')[2].text
# 获取 演员
movie_subject = parser_result.find('div','rating-info').find('span','subject-cast').text
# 获取详情
movie_details = bs.find('div','result-list').find('div','result').find('div','content').find('p').text

print("查询结果如下：")
print(f"[电影]：{movie_title} - {movie_link}")
print(f"{movie_score} {movie_comment_number} {movie_subject}")
print(f"{movie_details}")

flag = input('是否获取评论（y/n）：')

if "y" == flag or "Y" == flag:
    page_flag = True

    while (page_flag):

        number = int(input("输入查询页数(注：页数不能小于1且不能大于1000)。退出输入（0):"))

        if number == 0:
            page_flag = False
            print("已退出。")
            break

        # if not isinstance(number,int):
        #     print("输入正确页数。")
        #     continue

        if number < 1 or number > 1000:
            print("输入错误，请重试。")
            continue

        print(type(number),number)

        if number <= 1 or number <= 1000:
            print(f"查询内容如下：（第{number})页")

            comment_params = {
                'start': (number-1)*20,
            }
            url = movie_link + "reviews"

            comment_response = requests.get(url, params=comment_params, cookies=cookies,
                                    headers=headers)

            comment_result = BeautifulSoup(comment_response.text,"html.parser")
            review_list = comment_result.find("div", 'review-list').find_all('div','main review-item')

            for index,item in enumerate(review_list):
                context = item.find('div','main-bd').find('div','review-short').find('div','short-content').find('p')
                flag_content = None
                if None is not context:
                    flag_content = context.text

                content = str(item.find('div', 'main-bd').find('div', 'review-short').find('div', 'short-content').text).replace("(展开)","").strip() + "(展开)"
                if None is not flag_content:
                    content = content.split(flag_content)[1].lstrip()

                print(f"{index+1} {content} \n")



else:
    print("结束。")