'''

1、使用requests爬取网页
2、使用BeautifulSoup实现数据解析
3、使用pandas将数据写到excel中
'''

import time
import requests
from bs4 import BeautifulSoup
import pprint
import json
import pandas as pd

class HttpCodeException(Exception):
    pass

def get_html(url):
    # headers = {
    #     'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.87 Mobile Safari/537.36',
    #     'Host': 'movie.douban.com',
    #     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
    #     'Referer': 'https://movie.douban.com/top250?start=25&filter='
    # }
    #res = requests.get(url, headers=headers)
    print(f"get_html url:[{url}]")
    res = requests.get(url)
    time.sleep(1)
    print(res.status_code)
    if res.status_code != 200:
        raise HttpCodeException

    return res.text

def produce_url():
    '''
    每个页面有25个电影，共10个页面，这10个页面的url可以自己生成
    :return:
    '''

    url_style = "https://movie.douban.com/top250?start={index}&filter="
    url_list = []

    for i in range(0,25,25):
        url = url_style.format(index = i)
        print(url)
        url_list.append(url)
    return url_list

def get_moive_info_url(page_url):
    '''
    获取每个页面25个电影详细信息的url
    :param page_url:
    :return:
    '''

    html = get_html(page_url)
    soup = BeautifulSoup(html)

    moive_url_list = []
    ol_node = soup.find('ol', class_='grid_view')
    pic_nodes = ol_node.find_all('div', class_='pic')
    for pic_node in pic_nodes:
        a = pic_node.find('a')
        href = a['href']
        moive_url_list.append(href)

    return moive_url_list

def get_moive_info(html):
    """
    获取电影的详细信息
    :param html:
    :return:
    """

    soup = BeautifulSoup(html)

    area = ""
    info_div = soup.find('div', attrs={'id': 'info'})
    for child in info_div.children:
        if child.string and child.string.startswith('制片国家/地区'):
            area = child.next_sibling.string.strip()

    moive_info_script = soup.find('script', attrs={'type': 'application/ld+json'})
    moive_info_text = moive_info_script.text.replace('\r', '').replace('\n', '')
    json_data = json.loads(moive_info_text)

    info = {}
    info['director'] = json_data['director']    # 导演
    info['actor'] = json_data['actor']   # 主演
    info['datePublished'] = json_data['datePublished']  # 发型日期
    info['genre'] = json_data['genre']   # 电影类型
    info['ratingCount'] = json_data['aggregateRating']['ratingCount'] # 评价人数
    info['ratingValue'] = json_data['aggregateRating']['ratingValue'] # 评分
    info['area'] = area   # 制作国家地区

    return info

def get_moive_info_by_url(url):
    '''
    通过url获取电影的详细信息，包括导演、主演、发型日期、电影类型等
    :param url:
    :return:
    '''

    try:
        html = get_html(url)
        info = get_moive_info(html)
    except:
        return url
    return info


def run_single_thread():
    '''
    单线程爬取
    :return:
    '''
    res_file = open('moive_data', 'w')
    t1 = time.time()
    moive_url_list = []
    page_url_list = produce_url()

    for page_url in page_url_list:
        url_list = get_moive_info_url(page_url)
        moive_url_list.extend(url_list)

    for url in moive_url_list:
        info = get_moive_info_by_url(url)
        if isinstance(info, str):
            print(info)
        else:
            res_file.write(json.dumps(info) + "\n")

    res_file.close()
    t2 = time.time()
    print("耗时:" + str(t2 - t1))

# if __name__ == "__main__":
    # run_single_thread()
    # list = [{'rank':'1','title':'肖申克的救赎','comments':'192357'}, {'rank':'2','title':'阿甘正传','comments':'284876'}]
    #
    # for value in list:
    #     print(value)
    #
    # df = pd.DataFrame(list)
    # df.to_excel("豆瓣电影TOP250.xlsx")


# url = "https://so.gushiwen.cn/guwen/"
# response = requests.get(url)
# print(response.status_code)
# print(response.headers)
# print(response.encoding)
# response.encoding = "UTF-8"
# print(response.content)

a = '123'
b = '123'
print(a is b)

dict1 = {'one':1,'two':2, 'three':3}
dict2 = {'one':4,'tmp':5}
dict1.update(dict2)
print(dict1)

def changeList(nums):
    nums.append('c')
    print("nums:", nums)

str1 = ['a', 'b']
changeList(str1)
print("str1:", str1)

for i in range(5):
    if i == 2:
        pass
    print(i)

lists = [1, 1, 2, 3, 4, 5, 6]
lists.remove(1)
print(lists)
lists.append([7,8,9])
dicts = {1:"hello", 2:"world"}
lists.append(dicts)
print(lists)

index = 0
while index < len(lists):
    print(f"lists[{index}]={lists[index]}")
    index += 1

newList = lists[6]
print(newList)

newDict = lists[7]
for value in newDict:
    print(value)


from utils import *
