import collections
from bs4 import BeautifulSoup
import requests
import pymongo

# url = 'http://47.103.13.124:8001/base'
# 下面这个链接确实是可以防止使用cookies进行登录了，到底是做了什么呢?
# 如何破解这个反爬机制？——使用headers={
#   'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0'
# }
# 数据库部分
datas =[]
client = pymongo.MongoClient(host='localhost',port=27017)
db = client.crawler
collection = db.User_Agent

url = 'http://47.103.13.124:8001/user_agent'
cookies={
    'session':'.eJyrViotTi1SsqpWyiyOT0zJzcxTsjLQUcrJTwexSopKU3WUcvOTMnNSlayUDM3gQEkHrDE-M0XJyhjCzkvMBSmKKTVNMjMDkiamFkq1tQDfeR3n.YL-Igw.Hr56XXXCQz93GhMOfyGQM6OvUYE'
}
header={
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0'
}
# text = requests.get(url,cookies=cookies)
text = requests.get(url,cookies=cookies,headers=header)
# print(text.text)

# r = BeautifulSoup(text,'lxml')
r = BeautifulSoup(text.text,'lxml')
a = r.find('div',{'class':'movie-list'}).find_all('a')
# a = r.find('div',class_='movie-list').find_all('a')
# print(a)
for i in a:
    # 获取img标签中src属性的值
    img = i.find('img').attrs.get('src')
    title = i.find('h5').text
    jianShu = i.find('p').text
    pingFen = i.find('small').text
    # 基于上面那个，其实我都不需要用到兄弟节点来查找
    jieLun = i.find_all('small')[1].text
    datas.append({
        'img':img,
        'title':title,
        'jianShu':jianShu,
        'pingFen':pingFen,
        'jieLun':jieLun
    })
    # print(jieLun)
    # print(title+jianShu+pingFen)
# collection.insert_many(datas)
print('success')


















