from bs4 import BeautifulSoup  #网页解析，数据获取
import re   #正则表达式，文字匹配
import urllib   #指定url,获取网页数据
import xlwt     #解析excel表格
import sqlite3  #数据库操作
import urllib.request as req,urllib.parse as parse


def main():
    baseurl="https://movie.douban.com/top250?start="
    #爬取网页
    data=getData(baseurl)
    svaeDate(savepath=".\\豆瓣电影.xlsx",data=data)

def getData(baseurl):
    data = []
    for i in range(0,10):
        url=baseurl+str(i*25)
        html=askUrl(url)
        #统一解析
        soup=BeautifulSoup(html,"html.parser")

        for item in soup.find_all('div',class_="item"): #查找div的class为item的dom
            # print(item)   #测试，查看电影item信息
              #保存一部电影信息
            item=str(item) #转换字符串

            #正则表达式查找影片链接
            link=re.findall(re.compile(r'<a href="(.*?)">'),item)[0]
            img=re.findall(re.compile(r'<img .* src="(.*?)".*>',re.S),item)[0]
            title=re.findall(re.compile(r'<span class="title">(.*)</span>'),item)[0]
            # print(img)
            data.append((link,img,title))

            # print(link)
            # print(title)


    return  data
def svaeDate(savepath,data):
    workboot = xlwt.Workbook(encoding="utf-8")  # 创建workbook对象
    worksheet = workboot.add_sheet("sheet1")  # 创建sheet

    for i in range(0,len(data)):
        for j in range(0,len(data[0])):
            worksheet.write(i, j, data[i][j])  # 行、列，数据
    workboot.save(savepath)
    return

def askUrl(url):
    obj = req.Request(url=url, method="GET", headers={
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"})
    resp = req.urlopen(obj)
    return resp.read().decode("utf-8")
if __name__ == '__main__':
    main()