import json
from pprint import pprint

import requests
import re
import mysql_helper

con = mysql_helper.get_connection()
cursor = mysql_helper.get_cursor(con)


#获取二进制资源文件（图片，MP3，视频）
def get_resouce(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }

    respose = requests.get(url, headers=headers)
    if respose.status_code == 200:
        # respose.conent返回的是字节流，用decode转换成字符串
        return respose.content
    return None


#获取网页
def get_page(page):
    url = 'https://maoyan.com/board/4?offset=%s' % str(page * 10)
    #伪造请求头
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
    }


    respose = requests.get(url,headers = headers)
    if respose.status_code == 200:
        #respose.conent返回的是字节流，用decode转换成字符串
        return respose.content.decode('utf-8')
    return None

#保存图片

def save_image(url,title):
    image_content = get_resouce(url)
    #
    filename = url.split('/')[-1].split('@')[0]
    with open('./images/%s.jpg' % title,'wb') as f:
        f.write(image_content)



# 解析网页
def parse_page(html):

    #电影名称(？非贪婪匹配)re.S 表示忽略空格和回车符
    pattern = re.compile('movieId:.*?>.*?<img.*?<img.*?alt="(.*?)" class.*?', re.S)
    titles = re.findall(pattern,html)
    print(titles)

    #主演
    pattern = re.compile('<p class="star">(.*?)</p>', re.S)
    actors = re.findall(pattern, html)
    actors = [actor.strip() for actor in actors]
    print(actors)

    #上映时间
    pattern = re.compile('<p class="releasetime">上映时间：(.*?)</p>', re.S)
    releasetimes = re.findall(pattern,html)
    print(releasetimes)

    #评分
    pattern = re.compile('<p class="score"><i class="integer">(.*?)</i><i class="fraction">(.*?)</i></p>', re.S)
    scores = re.findall(pattern, html)
    scores = [''.join(item) for item in scores]
    print(scores)


    #排名
    pattern = re.compile('<i class="board-index.*?">(.*?)</i>', re.S)
    ranks = re.findall(pattern, html)
    print(ranks)

    #封面图片
    pattern = re.compile('movieId:.*?>.*?<img.*?<img.*?src="(.*?)" alt=.*? class.*?', re.S)
    covers = re.findall(pattern, html)
    print(covers)

    #把数据转换成 [{"title": },{}]这种形式
    result = []
    for i in range(len(titles)):
        data_dict = {}
        data_dict['title'] = titles[i]
        data_dict['actor'] = actors[i]
        data_dict['releasetime'] = releasetimes[i]
        data_dict['score'] = scores[i]
        data_dict['rank'] = ranks[i]
        #图片网址
        data_dict['cover'] = covers[i]
        #save_image(covers[i],titles[i])
        print(titles[i])
        mysql_helper.save_db2(con,cursor,data_dict)

        result.append(data_dict)
    return result


    #保存json文件
def save_json(result_list):
    json_str = json.dumps(result_list,ensure_ascii=False)
    with open('maoyan.json', 'w',encoding='utf-8') as f:
        f.write(json_str)

    #获取所有的页
def get_all_pages():
    result_list = []
    for page in range(10):
        print(page)
        html = get_page(page)
        # print(html)
        result = parse_page(html)
        result_list.extend(result)
    return result_list



def main():
    # html = get_page()
    # print(html)
    # parse_page(html)
    result_list = get_all_pages()
   # print(result_list)
    print(len(result_list))
    pprint(result_list)
    save_json(result_list)

if __name__ == '__main__':
    main()
