import sys
import json
from urllib import request
from bs4 import BeautifulSoup

#获取页面
def huoquyemian(url):
    respon = request.urlopen(url)
    page  = respon.read().decode('UTF-8')
    soup = BeautifulSoup(page,'html.parser')
    return soup
#根据页面抓取其中需要数据并输出
def zhuaqu(urlll,i):
    items = huoquyemian(urlll).find('ul',id='asyncRatingRegion').contents
    for string in items:
        if string != ' ':
            i = i+1
            print('序号：'+str(i))
            name = (repr(string.find('h2',class_='px14 pb6').string))
            print('片名：'+eval(name))
            daoyan = (repr(string.find('p').text))
            print(eval(daoyan))
            pingfen = (string.find('b'))
            if pingfen != None:
                print('评分：'+pingfen.text)
    return i
#先从页面抓取所需要的所有url，并用url抓取数据
def huoquurl(chushiurl):
    items = huoquyemian(chushiurl).find('div',id='PageNavigator').contents
    x = 0
    for item in items:
        if item.get('href') is None:
            url = chushiurl
            
        else:
            url = item.get('href')
        
        x = zhuaqu(url,x)

geturl = "http://www.mtime.com/top/movie/top100/"
huoquurl(geturl)