import requests
import requests.cookies
import re
import parsel
from fontTools.ttLib import TTFont
from PIL import Image,ImageDraw,ImageFont
import pytesseract
import csv
import time

# we can use Cookie in headers to avoid anti-spider
p_headers = {
    'Content-Type': 'text/plain;charset=UTF-8',
    'Referer': 'https://maoyan.com/board/4',
    'Origin': 'https://maoyan.com',
    'User-Agent': 'Mozilla/5.0(Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36',
    'Cookie': '__mta=89003684.1635642896904.1635664438609.1635664670130.5; __mta=89003684.1635642896904.1635664670130.1635664690146.6; uuid_n_v=v1; uuid=F4660CC039E711EC8B5A61A0E822839E3AB02DC8FABB48B5A9FBD9E3A61CD33D; _csrf=a85d5b6a9deeac54748199748d05c2664e9f12c69f2d4f2c2784e405bb5f170c; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1635642897; _lxsdk_cuid=17cd3e9f1a1c8-053d05f9c75e58-57b193e-1fa400-17cd3e9f1a2c8; _lxsdk=F4660CC039E711EC8B5A61A0E822839E3AB02DC8FABB48B5A9FBD9E3A61CD33D; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1635664690; _lxsdk_s=17cd51bb51b-aa9-806-791||7'
}


# website url is https://maoyan.com/board/4
# first, we get the page
def get_page(web_url):
    # send request
    response = requests.get(web_url, headers=p_headers)
    # define response encoding
    response.encoding = response.apparent_encoding
    if response.status_code == 200:
        # get successfully
        response.encoding="utf-8"
        return response.text
    # get failed
    return None


# then,we parse the page and get what we want
# use regular expression
def parse_page(web_html):
    # regular expression
    pattern = re.compile(
        '<dd>.*?board-index.*?>(.*?)</i>.*?<a href="(.*?)".title.*?name.*?a.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>.*?score">.*?>(.*?)</i>.*?">(.*?)</i>',
        re.S)
    # match items
    items = re.findall(pattern, web_html)
    # extract each attribute
    # we can get title,name_of_actors,rating in this page
    f = open("result.csv", 'a+', encoding='utf-8', newline="")
    writer = csv.writer(f)
    for item in items:
        # use movie_url to scrap other data
        movie_url = "https://maoyan.com"+item[1]
        title = item[2].strip()
        name_of_actors = item[3].strip()[3:]
        rating = item[5].strip() + item[6].strip()
        # parse movie page
        director, duration, type_i, income = parse_movie_page(movie_url)
        writer.writerow([title, director, name_of_actors, rating, income, duration, type_i])
    f.close()

# we scrap secondary page to get name_of_director,cumulative_income,duration and type
def parse_movie_page(movie_url):
    html = get_page(movie_url)
    # regular expression
    pattern = re.compile(
        '<div class="wrapper clearfix">.*?<li class="ellipsis">.*?blank">(.*?)</a>.*?ellipsis">(.*?)</li>.*?累计票房.*?box">.*?">(.*?)</span>.*?导演.*?name">(.*?)</a>',
        re.S)
    # get director,income,type and duration
    items = pattern.findall(html)
    for item in items:
        director = item[3].strip()
        # the number we get is encoded,so we need to decode the number
        income = item[2].strip()
        if income != "暂无":
            # decode the encoded income number
            income = decode_income(income,html)
        duration = item[1].strip().split("/")[1]
        type_i = item[0]
    return director, duration, type_i, income

def decode_income(income,html):
    # first,we should find web-font the page used
    pattern = re.compile("url\('(//vfile.meituan.net/colorstone/.*?.woff)'\) format\('woff'\);")
    font_url = 'http:' + pattern.findall(html)[0]
    file_name = font_url.split('/')[-1]
    # get the font page
    font_response = requests.get(font_url, headers=p_headers)
    # save as a file
    with open(file_name, mode='wb') as f:
        f.write(font_response.content)
    font = TTFont(file_name)
    # get the font encoding
    font.saveXML('font.xml')
    code_list = font.getGlyphOrder()[2:]
    # new a picture
    im = Image.new("RGB", (1800, 1800), (255, 255, 255))
    image_draw = ImageDraw.Draw(im)
    font = ImageFont.truetype(file_name, 40)
    new_list = [code.replace('uni', '\\u') for code in code_list]
    text = ''.join(new_list)
    text = text.encode('utf-8').decode('unicode_escape')
    # use the web-font to draw a picture
    image_draw.text((0, 100), text, font=font, fill="#000000")
    im.save("sss.jpg")
    im = Image.open("sss.jpg")
    # use pytesseract-ocr to identify the number
    res = pytesseract.image_to_string(im)
    res_str = [i for i in res]
    html_code_list = [i.lower().replace("uni", "&#x") + ";" for i in code_list]
    # create a match dict
    result = dict(zip(html_code_list, res_str))
    # use the dict to decode the income
    for k, v in result.items():
        html = html.replace(k, v)
    selector = parsel.Selector(html)
    str_income = selector.css(".movie-index-content .stonefont::text").getall()
    return str_income[2]


if __name__ == '__main__':
    # each page has 10 movies,so we need scrap 10 pages
    # create a new csv and set index
    f = open("result.csv", 'w', encoding='utf-8',newline="")
    writer = csv.writer(f)
    writer.writerow(["Title", "Name_of_director", "Name_of_actors", "Rating", "Cumulative income", "Duration", "Type"])
    f.close()
    # each circle get 10 movies on 1 page
    for offset in range(10):
        # offset 0->9
        offset = offset*10
        url = 'https://maoyan.com/board/4?offset=' + str(offset)
        # use requests to get page html
        html = get_page(url)
        # parse the page and get what we need
        parse_page(html)
        # sleep 10 sec
        time.sleep(10)