import json
import re
from pprint import pprint

import requests


#获取资源
def get_resouce(url):

    # 伪造请求头
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }
    respose = requests.get(url, headers=headers)
    if respose.status_code == 200:

        return respose.content
    return None


#保存图片
def save_image(url,titles):
    image_content = get_resouce(url)
    filename = url.split('/')[-1].split('@')[0]
    with open('./image/%s' % titles, 'wb') as f:
        f.write(image_content)


# #保存语句
# def save_intro(intro):
#     intro_content = intro
#     with open('./intros/', 'w') as f:
#         f.write(intro_content)
#

#获取网页
def get_page(page):
    url = 'https://movie.douban.com/top250?start=%s' % str(page + 24)
    #伪造请求头
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }
    respose = requests.get(url, headers=headers)
    if respose.status_code == 200:
        # respose.conent返回的是字节流，用decode转换成字符串
        return respose.content.decode('utf-8')
    return None

#分析网页


def parse_page(html):
 # 电影名称(？非贪婪匹配)re.S 表示忽略空格和回车符
    pattern = re.compile('<img.*?alt="(.*?)" src=.*? class.*?',re.S)
    titles = re.findall(pattern, html)
    print(titles)
#简介
    pattern = re.compile(' <span class="inq">(.*?)</span>',re.S)
    lines = re.findall(pattern, html)

    print(lines)
#封面图片
    pattern = re.compile('<img width=.*? alt=.*? src="(.*?)" class=.*?>',re.S)
    covers = re.findall(pattern, html)
    print(covers)

#把数据转换成 [{"title": },{}]这种形式
    result= []
    for i in range(len(titles)):
        data_dict = {}
        data_dict['title'] = titles[i]
        data_dict['intro'] = lines[i]
        data_dict['covers'] = covers[i]
        #save_image(covers[i],titles[i])
        #save_intro(lines[i])
        result.append(data_dict)
    pprint(result)

    return result

   #保存json文件
def save_json(result_list):
    json_str = json.dumps(result_list,ensure_ascii=False)
    with open('douban.json', 'w',encoding='utf-8') as f:
        f.write(json_str)

   #获取所有的页
def get_all_pages():
    result_list = []
    for page in range(10):
        print(page)
        html = get_page(page)
        # print(html)
        result = parse_page(html)
        result_list.extend(result)
    return result_list



def main():
    # html = get_page()
    # #print(html)
    # parse_page(html)
    result_list = get_all_pages()
    pprint(result_list)
    save_json(result_list)


if __name__ == '__main__':
    main()

