import requests
from bs4 import BeautifulSoup
from excel import data_write


# 素材-爬取电影名称
# https://blog.csdn.net/datacastle/article/details/78812471  可用作PPT制作的素材
# url = 'https://movie.douban.com/subject/1292052/'
# data = requests.get(url).text
# s=etree.HTML(data)
#
# film=s.xpath('//*[@id="content"]/h1/span[1]/text()')
# print(film)

# https://www.cnblogs.com/haichong/p/8067874.html

class GetMovieDatas():
    datas = []

    # 获取网页
    def get_content(self, page):
        for i in range(page):
            print('--------正在爬取第%i页的数据--------' % (i + 1))
            url = 'https://movie.douban.com/top250?start=%s&filter=' % (str(i * 25))
            html = requests.get(url).text
            self.datas.extend(self.get_data(html))

    def get_data(self, html):
        soup = BeautifulSoup(html, 'lxml')
        data = soup.find('ol').find_all('li')
        datas = []
        for info in data:
            #   排名
            num = info.find('em').get_text()
            #   影片名字
            names = info.find_all('span')  # find_all返回的是列表
            name1 = names[0].get_text()
            name2 = names[1].get_text().replace('\xa0', ' ')
            name3 = names[2].get_text().replace('\xa0', ' ')
            all_name = name1 + ' ' + name2 + ' ' + name3
            #   导演
            director = info.find('p').get_text().replace('  ', '').replace('\n', '').replace('\xa0', '')
            #   简短的评语
            # comment = info.find_all("span", {"class": "inq"})#    等效:comment = info.find_all('span', 'inq')
            comments = info.find_all('span', class_='inq')  #
            comment = comments[0].get_text()
            #   评分
            star = info.find('span', class_='rating_num').get_text()
            #   电影详情页面
            detail_url = info.find('a').get('href')
            # print([num, all_name, director, comment, star, detail_url])
            datas.append([num, all_name, director, comment, star, detail_url])
        return datas


if __name__ == '__main__':
    gm = GetMovieDatas()
    gm.get_content(4)
    data_write(gm.datas)

'''
1. 把数据写入表格 -done
2. 把数据写入数据库
3. 把线性脚本进行封装并优化-done
4. 反爬取策略
5. 制作PPT(做之前问一下峰哥做没做)

PPT思路:
1. 爬虫的原理
2. 实现
    1. 包的下载(requests, lxml)
    2. 框架的选取
    3. BeautifulSoup的基本用法
3. 反爬取策略

'''
