'''
demo3介绍：
目标：爬取某瓣Top250影片
以下库，由于编写程序的过程中，出现了各种报错，从网页上搜索了不同的办法，导致添加了较多但未使用到的第三方库
'''
from urllib.request import urlopen
from bs4 import BeautifulSoup
import requests
import json
import csv
import re
import pandas as pd
import os
from openpyxl import Workbook

# 设置浏览器信息
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36 Edg/124.0.0.0'
}

url_target = 'https://movie.douban.com/top250'

''''
#html = urlopen(url_target)
#bsObj = BeautifulSoup(html,"html parser")
html2 = requests.get(url = url_target,headers = headers)
bsObj2 = BeautifulSoup(html2.content,"lxml")
#print(bsObj)
#print(bsObj2)
'''

links_get = []  # 爬虫获取的链接


def link_parser(url_target):
    html_obj = requests.get(url=url_target, headers=headers)
    bsObj = BeautifulSoup(html_obj.content, "lxml")
    return bsObj

#获取subject目录下的链接数据
def link_search(bsObj):
    for link in bsObj.findAll("a", href=re.compile("(/subject/)+")):
        if "href" in link.attrs:
            # print(link.attrs["href"])
            links_get.append(link.attrs["href"])
    return links_get

#通过excel进行存储
def file_save(links_get):
    df = pd.DataFrame(links_get, columns=['已获取的链接'])
    df.to_excel("./豆瓣.xlsx", index=False)


def main():
    print("爬虫结束！")


if __name__ == "__main__":
    lib1 = {'1': 'https://movie.douban.com/top250'}
    print(lib1)
    i = input("输入你想要解析的网页地址：")
    url_target = lib1[i]
    bsObj = link_parser(url_target)
    links_get = link_search(bsObj)
    file_save(links_get)
    main()
