from pymongo import collection
import requests
from bs4 import BeautifulSoup
import pymongo
url='http://47.103.13.124:8001/base'
# data={
#     "":,
#     ":"
# }
# text = requests.post(url,data=data,headers=header)
cookies={
	'session':'.eJyrViotTi1SsqpWyiyOT0zJzcxTsjLQUcrJTwexSopKU3WUcvOTMnNSlayUDM3gQEkHrDE-M0XJyhjCzkvMBSmKKTVNMjMDkiamFkq1tQDfeR3n.YLt59w.idiFJx8yLHctKx80BVHVXZJXhtU'
}
# 数据库连接操作部分,创建数据库crawler，创建集合demo1
client = pymongo.MongoClient(host="localhost",port=27017)
db = client.crawler
collection = db.demo1

text = requests.get(url,cookies=cookies)
# print(text.status_code,text.text)
soup = BeautifulSoup(text.text,'lxml')
# 使用lxml去解析HTML 
div = soup.find('div',class_='movie-list').find_all('a')
# print(t)
# img = div.find_all('img').get.attr('img')
datas= []
for movie in div:
    # img = movie.find('img').get.attrs('src')
    img = movie.find('img').attrs.get('src')
    taiCi = movie.find('p').get_text()
    # taiCi = movie.find('p').text
    # print(img,taiCi)
    datas.append({
        'img':img,
        'taiCi':taiCi
    })
# 将数据插入数据库
# collection.insert_many(datas)
# for i in t:
#     print(i)
print('success')