# -*- coding: utf-8 -*-

import json
import os 
import warnings
import hashlib 
import requests
from lxml import etree
from requests.exceptions import RequestException


base_url = 'https://www.maoyan.com/board/4?offset={0}'

headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
def get_one_page(offset):


    url = base_url.format(offset)
    try:
        resp = requests.get(url,headers=headers)
        if resp.status_code == requests.codes.ok:
            return resp.text
        warnings.warn(resp.status_code)
        return None
        
    except RequestException as e:
        warnings.warn(e.message)
        return None


def parse_page(html):


    if not bool(html):
        return False

    html = etree.HTML(html, etree.HTMLParser())
    dds = html.xpath('//dl[@class="board-wrapper"]/dd')
    
    for dd in dds:

        index = dd.xpath('.//i[contains(@class,"board-index")]/text()')
        film_index = index[0].strip() if len(index) else None

        name = dd.xpath('.//p[@class="name"]/a/text()')
        film_name = name[0].strip() if len(name) else None
        
        star = dd.xpath('.//p[@class="star"]/text()')
        film_star = star[0].strip() if len(star) else None  

        releasetime = dd.xpath('.//p[@class="releasetime"]/text()')
        film_releasetime = releasetime[0].strip() if len(releasetime) else None 

        image_link = dd.xpath('.//a[@class="image-link"]/img[2]/@data-src')
        film_image_link = image_link[0].strip() if len(image_link) else None

        score = dd.xpath('.//p[@class="score"]/i/text()')
        film_score = score[0].strip()+score[1].strip() if len(score) else None

        save_cover_image(download_cover_image(film_image_link))

        yield {
                'film_index': film_index,
                'film_name': film_name,
                'film_star': film_star,
                'film_releasetime': film_releasetime,
                'film_image_link': film_image_link,
                'film_score': film_score
        }


def save_to_json(item):
    for item in item:
        item = json.dumps(item, ensure_ascii=False, indent=4)
        with open('top100.json', 'a', encoding='utf8') as wf:
            wf.write(item)
            
    print('DONE!')



def download_cover_image(url_link):

    try:
        resp = requests.get(url_link, headers=headers)
        if resp.status_code == requests.codes.ok:
            return resp.content
        warnings.warn(resp.status_code)
        return None
    except RequestException as e:
        warnings.warn(e.message)
        return None


def save_cover_image(content):


    if not bool(content):
        return False

    md5 = hashlib.md5()
    md5.update(content)
    dir_path = os.path.join(os.getcwd(), 'covers')
    file_name = md5.hexdigest()+'.jpeg'
    full = r'{0}\{1}'.format(dir_path,file_name)
    # print(full)
    if not os.path.exists(full):
        with open(full, 'wb') as wf:
            wf.write(content)
            print("SAVE IMAGE DONE!")


def main(offset):

    html = get_one_page(offset)
    item = parse_page(html)
    # save_to_json(item)

if __name__ == '__main__':

    for i in range(0, 10): print("正在访问第【{0}】页".format(i+1)),main(i * 10)



            