import requests
from bs4 import BeautifulSoup
import bs4
import time
from requests.exceptions import RequestException
from lxml import etree
import os
import json
from pyquery import PyQuery

def getPage(url):
    '''爬取url地址信息'''
    try:
        hd = {'user-agent':'Mozilla/5.0'}
        r = requests.get(url,headers=hd)
        r.status_code
        return r.text
    except:
        return 'error'
def parsePage(content):
    '''解析爬取网页中的内容'''
    #==============xPath======================
    '''
    html = etree.HTML(content)
    items = html.xpath("//div[@class='item']")
    for item in items:
        yield{
            "index":item.xpath(".//div/em[@class='']/text()")[0],
            'name':item.xpath('.//div//span[@class="title"]//text()')[0],
            'pic':item.xpath(".//div//img[@width='100']/@src")[0],
            'actor':item.xpath('./div//div[@class="bd"]/p/text()')[0],
            'score':item.xpath('.//div//span[@class="rating_num"]/text()')[0],
            }
    '''
    #===============BeautifulSoup=====================
    '''
    soup = BeautifulSoup(content,'lxml')
    items = soup.find_all(name='div',attrs={'class':'item'})
    for item in items:
        yield{
            'index':item.em.string,
            'name':item.find(name='span',attrs={"class":"title"}).string,
            'pic':item.find(name='img',attrs={"width":"100"}).attrs['src'],
            'actor':item.select('div.bd p')[0].get_text(),
            'score':item.select('div.star span.rating_num')[0].string,
            }
    '''
    #================JQuery===============================
    doc = PyQuery(content)
    items = doc('div.item')
    for item in items.items():
        yield {
            'index':item.find('div.pic em').text(),
            'title':item.find("div.hd span.title").text(),
            'image':item.find("div.pic img").attr('src'),
            'actor':item.find("div.bd p:eq(0)").text(),
            'score':item.find("div.star span.rating_num").text(),
            
            }
        
def writeFile(content):
    with open('./result.txt','a',encoding='utf-8') as f :
        f.write(json.dumps(content,ensure_ascii=False)+"\n")
        


def main(offset):
    '''主程序函数'''
    url = 'https://movie.douban.com/top250?start='+str(offset)
    html = getPage(url)
    t = parsePage(html)
    for item in t:
        print(item)
        #writeFile(item)

if __name__ == '__main__':
    
    main(25)
    '''
    for i in range(10):
        main(offset=i*25)
        time.sleep(1)
    '''
