from bs4 import BeautifulSoup
import requests
import random

url = "http://www.biqudu.tv/0_698/"


header  = {
            'accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
            'accept-encoding': 'gzip, deflate,br',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
            'if-modified-since': '',
            'if-none-match': '',
            'referer': 'https://www.b96c9843c7f35ade.com/',
            'sec-fetch-dest': 'image',
            'sec-fetch-mode': 'no-cors',
            'sec-fetch-site': 'cross-site',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66',
            'If-None-Natch':'',
            'If-Modified-Since':''
             }
proxies = ['HTTP://110.243.30.23:9999', 'HTTP://222.189.191.206:9999', 'HTTP://118.212.104.138:9999',
           'HTTP://182.149.83.97:9999', 'HTTP://106.42.163.100:9999', 'HTTP://120.83.107.69:9999',
           'HTTP://60.13.42.135:9999', 'HTTP://60.205.188.24:3128', 'HTTP://113.195.232.23:9999',
           'HTTP://59.62.36.74:9000', 'HTTP://218.2.226.42:80']
proxy = {'HTTP': random.choice(proxies)}

# print('a标签类型是：', type(soup.a))   # 查看a标签的类型
# print('第一个a标签的属性是：', soup.a.attrs)  # 获取a标签的所有属性(注意到格式是字典)
# print('a标签属性的类型是：', type(soup.a.attrs))  # 查看a标签属性的类型
# #print('a标签的class属性是：', soup.a.attrs['class'])   # 因为是字典，通过字典的方式获取a标签的class属性
# print('a标签的href属性是：', soup.a.attrs['href'])   # 同样，通过字典的方式获取a标签的href属性

response = requests.get(url,proxies=proxy,headers=header)
response.encoding='utf-8'   #解决了中文乱码问题
soup =  BeautifulSoup(response.text,'lxml')
meaning1 = soup.select('#list > dl') #为啥子要加[0]？ <class 'bs4.element.ResultSet'>
meaning = soup.select('#list > dl')[0] #为啥子要加[0]？ <class 'bs4.element.Tag'>
# print(type(meaning))
# print(type(meaning1))
for i in meaning.children:
    if type(i) == type(meaning):
        print(i.a.get('href'))



# print(meaning1)
