# 猫眼电源top100爬虫，使用lxml提取信息，保存文件maoyan.txt
import requests,time
from lxml import etree

def get_page(url):
    '''获取页面'''
    hs={
        'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding':'gzip, deflate, br',
        'Accept-Language':'zh-CN,zh;q=0.9',
        'Host':'maoyan.com',
        'Referer':'https://maoyan.com/board/4',
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4039.400',
        }
    try:
        resp=requests.get(url,headers=hs)#需要加headers，否则会被反爬虫
        if resp.status_code==200:
            return resp.content.decode('utf8')
    except Exception as e:
        print(e)
        return None

def get_info(html_str):
    '''提取信息并保存'''
    html=etree.HTML(html_str)
    d_li=html.xpath('//dl[@class="board-wrapper"]//dd')
    for d in d_li:
        index=d.xpath('string(./i)')
        n_url=d.xpath('string(./a/@href)')
        img=d.xpath('string(.//img[@class="board-img"]/@data-src)')#浏览器是src，代码是data-src
        name=d.xpath('string(.//p[@class="name"]//a)')
        star=d.xpath('string(.//p[@class="star"])').split('主演：')[-1].replace('\n','').replace(' ','')
        rtime=d.xpath('string(.//p[@class="releasetime"])').replace('上映时间：','')
        score=d.xpath('string(.//p[@class="score"])')
        with open('maoyan.txt','a',encoding='utf8')as f:
            f.write(f'{index}#{n_url}#{img}#{name}#{star}#{rtime}#{score}\n')


def main():
    for i in range(10):
        url='https://maoyan.com/board/4?offset='+str(i*10)
        html_str=get_page(url)
        # with open(f'{i}.html','w',encoding='utf8')as f:
        #     f.write(html_str)
        if html_str:
            get_info(html_str)
        time.sleep(5)

if __name__ == '__main__':
    main()
