#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：PythonData 
@File    ：case3_bs4.py
@Author  ：朱志文
@Date    ：2021/12/28 8:05 
'''
import bs4,pandas
from bs4 import BeautifulSoup
import requests

# f = open('demo.html')
# print(f.read())
# f.close()

# with open('demo.html', 'rb') as f:
#     print(f.read())

# #request 访问
# resp=requests.get(
#     url='https://www.python123.io/ws/demo.html',
#     headers={'User-Agent':'BaiduSpider'}
# )
# # 创建BeautifulSoup对象
# soup=bs4.BeautifulSoup(resp.text,'html.parser')
# #通过css选择器从页面中提取标签
# title_soup=soup.select('p.title>b')
# rank_soup=soup.select('p.course')
#
# for title_p, rank_p in zip(title_soup, rank_soup):
#         print(title_p.text)
#         print(rank_p.text)

'''遍历'''
# import requests
# from bs4 import BeautifulSoup
# resp=requests.get(
#     url='https://www.python123.io/ws/demo.html',
#     headers={'User-Agent':'BaiduSpider'}
# )
# '''上行遍历'''
# soup=bs4.BeautifulSoup(resp.text,'lxml')
# for x in soup.title.parents:
#     if x is soup.html:
#         print(x.name)
#     else:
#         print(x)


'''提取所有的超链接'''
# import requests
# import bs4
# resp=requests.get(
#     url='http://wlacm.com'
# )
# soup=bs4.BeautifulSoup(resp.text,'html.parser')
# n=1
# #find_all(name,attrs,recusive(对全部子孙进行检索),string,**kwargs)
# for x in soup.find_all('a'):
#     href=x.get('href')
#     if 'http' in href:
#         print(n,href)
#         n+=1

'''豆瓣排行榜'''
# for page in range(1,11):
#     resp=requests.get(
#         url=f'https://movie.douban.com/top250?start={(page-1)*25}&filter=',
#         headers={'User-Agent':'BaiduSpider'}
#     )
#
#     soup=bs4.BeautifulSoup(resp.text,'lxml')
#     title_span=soup.select('div.info>div.hd>a>span:nth-child(1)')
#     rank_spans = soup.select('div.info > div.bd > div > span.rating_num')
#     comment_span=soup.select('div.info > div.bd >p.quote>span.inq')
#     # title_span_list=[]
#     for title_span,rank_spans,comment_span in zip(title_span,rank_spans,comment_span):
#         print(title_span.text,rank_spans.text,comment_span.text)
#         # title_span_list.append(title_span.text)



'''
import pandas as pd
#任意的多组列表
a = ['1','2','3']
b = [4,5,6]
print(title_span_list)
#字典中的key值即为csv中列名
dataframe = pd.DataFrame({'a_name':title_span_list})

#将DataFrame存储为csv,index表示是否显示行名，default=True
dataframe.to_csv("test.csv",index=False,sep=',')'''

'''爬取网站图片'''
# import requests,bs4,os
# def getHTMlPict(url):
#     root='D://Download//'
#     path=root+url.split('/')[-1]
#     try:
#         if not os.path.exists(root):
#             os.mkdir(root)
#         if not os.path.exists(path):
#             resp=requests.get(url)
#             with open(path,'wb') as f:
#                 f.write(resp.content)
#                 print('保存成功')
#         else:
#             print("文件已经存在")
#     except:
#         print('爬取失败')
#
# try:
#     resp=requests.get(
#             url='http://www.zwu.edu.cn/24/list.htm'
#     )
#     soup=bs4.BeautifulSoup(resp.text,'lxml')
#     img_soup=soup.select('tbody>tr>td>p>a>img')
#     a_soup=soup.select('tbody>tr>td>p>a')
#     src_list=[]
#     for img_soup,a_soup in zip(img_soup,a_soup):
#         # print(a_soup.get('href'))
#         src_list.append('http://www.zwu.edu.cn/'+img_soup.get('src'))
#     for url in src_list:
#         print(url)
#         getHTMlPict(url)
# except:
#     print('error')


'''爬取中国大学排行'''
import requests,bs4

resp=requests.get(
    url='https://www.shanghairanking.cn/rankings/bcur/2021',
    headers={'User-Agent':'BaiduSpider'}
)
resp.raise_for_status()
resp.encoding=resp.apparent_encoding
soup=bs4.BeautifulSoup(resp.text,'lxml')
rank_div=soup.select('tbody>tr>td>div.ranking')#排名
name_div=soup.select(('tbody>tr>td.align-left>div.univname-container>div.univname>div:nth-child(1)>div.tooltip>div.link-container>a'))#学校名称
nameEngilsh_div=soup.select(('tbody>tr>td.align-left>div.univname-container>div.univname>div:nth-child(2)>div.tooltip>div.link-container>a'))#学校名称
address_td=soup.select('tbody>tr>td:nth-child(3)')#省市
gen_td=soup.select('tbody>tr>td:nth-child(4)')#综合
score_td=soup.select('tbody>tr>td:nth-child(5)')#分数
level_td=soup.select('tbody>tr>td:nth-child(6)')#办学层次
for rank_div,name_div,nameEngilsh_div,address_td,gen_td,score_td,level_td in zip(rank_div,name_div,nameEngilsh_div,address_td,gen_td,score_td,level_td):
    print(rank_div.text.strip(),name_div.text.strip(),nameEngilsh_div.text.strip(),address_td.text.strip(),gen_td.text.strip(),score_td.text.strip(),level_td.text.strip())


# from selenium import webdriver
# # 要想调用键盘按键操作需要引入keys包
# from selenium.webdriver.common.keys import Keys
# from selenium.webdriver import ActionChains
# #创建浏览器对象
# driver = webdriver.Chrome("D:\Download\chromedriver.exe")
#
# driver.get("http://www.shanghairanking.cn/rankings/bcur/2021")
#
# for i in range(20):
#
#     #鼠标移动到某处单击
#     action2 = driver.find_element_by_class_name("ant-pagination-next")
#     ActionChains(driver).move_to_element(action2).click(action2).perform()