from selenium import webdriver
from lxml import etree
import csv
import time
import math
import requests
from bs4 import BeautifulSoup


''' 
  爬虫demo： 爬取豆瓣电影TOP250
'''


""" 
  获取所有页面地址
"""
def get_url_list():
  aim_list = [] 
  for i in range(0, 250, 25):
    url = 'https://www.douban.com/doulist/2772079/?start='+ str(i) +'&sort=seq&playable=0&sub_type='
    aim_list.append(url)
  return aim_list

""" 
  获取网页源码
"""
def get_html_text(url):
  try:
    header = {
      'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'
    }
    r = requests.get(url, headers=header, timeout=5, )
    return r.text
  except:
    return 'error'

def main():
  url_list = get_url_list()
  movie_data_list = []
  for url in url_list:
    source_page = get_html_text(url)
    if source_page != 'error':
      html_page = etree.HTML(source_page)
      movie_list = html_page.xpath('//*[@class="doulist-item"]')

      for movie in movie_list:
        movie_dic = {} 
        movie_dic['rank'] = movie.xpath('.//div/div[1]/span/text()')[0]
        nameL = movie.xpath('.//div/div[contains(@class, "doulist-subject")]/div[contains(@class, "title")]/a/text()')
        if len(nameL) == 0:
          movie_dic['name'] = '没有信息'
        else:  
          if nameL and nameL[0].strip():
            movie_dic['name'] = nameL[0].strip()
          else:
            movie_dic['name'] = nameL[1].strip()

          msgList = movie.xpath('.//div/div[contains(@class, "doulist-subject")]/div[contains(@class, "abstract")]/text()')
          for i in range(len(msgList)):
            movie_dic[ 'msg' + str(i) ] = msgList[i].strip()
          # print('msgList', msgList)
        movie_data_list.append(movie_dic)
      print(movie_data_list)
      save_csv(movie_data_list, fileName = '豆瓣TOP250')


''' 
  保存dict数据至csv文件
'''
def save_csv(dict, **other):
  if 'fileName' in other:
    file_name = other['fileName'] + '.csv'
  else:
    file_name = str(int(time.time())) + '.csv'
    print('fileName:', file_name)
  header = get_header(dict)
  with open(file_name, 'w', newline='', encoding="utf-8" ) as f:
    writer = csv.DictWriter(f, fieldnames=header)
    writer.writeheader()  # 写入列名
    writer.writerows(dict)  # 写入数据
    print("数据写入完成")

''' 
  @input dictionary # 要保存的字典数据
  @return list      # 输出字典数据的key列表，作为csv文件的表头
'''
def get_header(dict):
  headers = []
  first_dict = dict[0]
  for column_name in first_dict.keys():
    headers.append(column_name)
  return headers

if __name__ == '__main__':
  main()