import requests,json
from requests.exceptions import RequestException
from lxml import etree
from bs4 import BeautifulSoup
from pyquery import PyQuery

def getPage(url):
    "爬取需求的URL页面信息"
    try:
        #headers信息
        headers={
               'User-Agent':'Mozilla/5.0 (Windows NT 6.1;WOW64) AppleWebKit/537.36 (KHTML,like GeCKO) Chrome/45.0.2454.85 Safari/537.36 115Broswer/6.0.3',
               'Referer':'https://movie.douban.com/',
               'Connection':'keep-alive'
            }
        #爬取信息
        res = requests.get(url,headers=headers)

        if res.status_code == 200:
            return res.text
        else:
            return None
    except RequestException:
        return None

def parsePage(content):
    "解析爬取网页中的内容，返回结果"
    html = etree.HTML(content)

    items = html.xpath("//div[@class='item']")
    print(len(items))
    for item in items:
        yield {
            'select':1,
            'index':item.xpath(".//div/em[@class='']/text()")[0],
            'name':item.xpath(".//span[@class='title']/text()")[0],
            'picture':item.xpath(".//img[@width='100']/@src")[0],
            'actor':item.xpath(".//p[@class='']/text()")[0],
            'score':item.xpath(".//span[@class='rating_num']/text()")[0]
            }
    
def parsePage_1(content):
    soup = BeautifulSoup(content,'lxml')

    items = soup.find_all(name="div",attrs={"class":"item"})
    
    for item in items:
        yield {
            'select':2,
            'index':item.em.string,
            'name':item.find(name="span",attrs={"class":"title"}).string,
            'picture':item.find(name="img",attrs={"width":"100"}).attrs["src"],
            'actor':item.select("div.bd p")[0].get_text(),
            'score':item.select("div.star span.rating_num")[0].string
            }

def parsePage_2(content):
    doc = PyQuery(content)

    items = doc("div.item")
    
    for item in items.items():
        yield {
            'select':3,
            'index':item.find("div.pic em").text(),
            'name':item.find("div.hd span.title").text(),
            'picture':item.find("div.pic img").attr("src"),
            'actor':item.find("div.bd p:eq(0)").text(),
            'score':item.find("div.star span.rating_num").text()
            }
    

def writeFile(content):
    "三种解析方式 存入不同文件写入操作"
    print(content.get("select"))
    if content.get("select") == 1:
        with open('./result.txt','a',encoding="utf-8") as f:
            f.write(json.dumps(content,ensure_ascii=False)+'\n')
    elif content.get("select") == 2:
        with open('./result1.txt','a',encoding="utf-8") as f:
            f.write(json.dumps(content,ensure_ascii=False)+'\n')
    elif content.get("select") == 3:
        with open('./result2.txt','a',encoding="utf-8") as f:
            f.write(json.dumps(content,ensure_ascii=False)+'\n')

def main(offset):
    "主程序函数，合理使用各种方法"
    url = "https://movie.douban.com/top250?start="+str(offset)

    html = getPage(url)

    for item in parsePage(html):
        #print(item)
        writeFile(item)
    for item in parsePage_1(html):
        #print(item)
        writeFile(item)
    for item in parsePage_2(html):
        #print(item)
        writeFile(item)

if __name__ == '__main__':
    main(0)
    '''for i in range(10):
          main(i*25)'''
