# python学习 至此开始
# 学习日期: 2023/7/11 19:03
# 学习者: 任磊

# 导入所需的库
import requests
import json
from bs4 import BeautifulSoup
from PIL import Image
import re
import xlwt

# 1 百度翻译爬虫
print("------------------------百度翻译爬虫----------------------")
# url1 = "https://fanyi.baidu.com/sug"
# head = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
#         "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 SLBrowser/8.0.1.5162 SLBChan/109"}
# while True:
#     inner_word = input('请输入要翻译的单词:')
#     data = {"kw": inner_word}
#     return_origin = requests.post(url=url1, data=data, headers=head)
#     if return_origin.ok:
#         dic_obj = return_origin.json()
#         out = dic_obj['data'][0]['v']
#         print(f"该单词的翻译为:{out}")
#     flag1 = input("是否继续翻译？(输入 1 以继续)")
#     if flag1 == "1":
#         pass
#     else:
#         print("翻译结束！")
#         break


# 2 必应翻译爬虫
print("------------------------必应翻译爬虫----------------------")
# url1 = "https://cn.bing.com/translator"
# head1 = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 SLBrowser/8.0.1.5162 SLBChan/109",  # 伪装请求头
#          "accept": "*/*"}
# while True:
#     inner_word = input('请输入要翻译的单词:')
#     data1 = {"fromLang": "en",  # 设置请求数据
#              "text": inner_word,
#              "to": "zh-CHS",
#              }
#     return_origin = requests.post(url=url1, data=data1, headers=head1)  # 发送请求
#     if return_origin.ok:  # 判断返回状态是否正常
#         # out = BeautifulSoup(return_origin.text, "html.parser")
#         # out_word = out.findAll("div", attrs={"class": "tta_outtxt"})
#         # print(out_word)
#
#         print(return_origin.json())
#     else:
#         print("翻译失败！")
#     flag1 = input("是否继续翻译？(输入 1 以继续):")
#     if flag1 == "1":
#         pass
#     else:
#         print("翻译结束！")
#         break


# 3 百度 北方工大 图片爬取
print("------------------百度 北方工大 图片爬取-------------------------------")
url2 = "https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=detail&fr=&hs=0&xthttps=111110&sf=1&fmq=1689075676637_R&pv=&ic=0&nc=1&z=&se=&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=%E5%8C%97%E6%96%B9%E5%B7%A5%E4%B8%9A%E5%A4%A7%E5%AD%A6&oq=%E5%8C%97%E6%96%B9%E5%B7%A5%E4%B8%9A%E5%A4%A7%E5%AD%A6&rsp=-1"
head2 = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 SLBrowser/8.0.1.5162 SLBChan/109",
         }
return_origin2 = requests.get(url=url2, headers=head2)
if return_origin2.ok:
    return_origin2.encoding = "utf-8"
    soup = BeautifulSoup(return_origin2.text, "html.parser")
    photo_content = soup.findAll("div", attrs={"class": "imgbox-border"})
    print(soup)
    for photo in photo_content:
        print(photo.text)
else:
    print("获取失败！")


# 4 爬取链家二手房信息
print("---------------------爬取链家二手房信息-----------------------")
# head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 SLBrowser/8.0.1.5162 SLBChan/109"}  # 伪装请求
# book = xlwt.Workbook(encoding='utf-8', style_compression=0)  # 创建文件
# sheet = book.add_sheet('Lianjia', cell_overwrite_ok=True)  # 添加 Lianjia 为文件名
# # col = ('小区名', '户型', '面积', '朝向', '楼层', '总价', '单价')
# sheet.write(0, 0, '小区名')  # 为文件加上初始名
# sheet.write(0, 1, '户型')
# sheet.write(0, 2, '面积(单位平方米)')
# sheet.write(0, 3, '朝向')
# sheet.write(0, 4, '楼层')
# sheet.write(0, 5, '总价(单位万)')
# sheet.write(0, 6, '单价(单位元/平方米)')
# for i in range(1, 6):  # 爬取 5 页资料
#     h = (i - 1) * 30 + 1  # 每页有 30 个元数据
#     l = 0
#     if i == 1:  # 为地址添加变量以实现连续爬取
#         url = "https://bj.lianjia.com/ershoufang/pg/"
#     else:
#         url = "https://bj.lianjia.com/ershoufang/pg" + str(i) + "/"
#     return_data = requests.get(url, head)
#     if return_data.ok:  # 判断返回状态
#         return_data.encoding = "utf-8"  # 重编码防止乱码出现
#         soup = BeautifulSoup(return_data.text, "html.parser")  # 将获得的数据进行分析
#         content = soup.findAll("div", attrs={"class": "info clear"})  # 查找目标数据
#         for data in content:  # 遍历目标数据
#             # house_name = data.findAllNext("a", attrs={"data-el": "region"})  # 小区名
#             # house_type = data.findAllNext("div", attrs={"class": "houseInfo"})  # 户型，面积，朝向，楼层
#             # house_all_price = data.findAllNext("div", attrs={"class": "totalPrice totalPrice2"})  # 总价格
#             # house_single_price = data.findAllNext("div", attrs={"class": "unitPrice"})  # 每平方单价
#             house_content = data.get_text()  # 获得文件的纯文本格式，为后面正则判断作准备
#             # print(house_content)
#             # print("小区名:", re.findall(r"(?<=必看好房)\S{4,}\b", house_content))  # 小区名
#             house_name = re.findall(r"(?<=必看好房).*-", house_content)  # 获得小区名
#             sheet.write(h, l, house_name)
#             # print(house_name)
#             l += 1  # 换列
#             # print(re.findall(r"\d室\d厅", house_content))  # 户型
#             house_type = re.findall(r"\d室\d厅", house_content)  # 获得房屋户型
#             sheet.write(h, l, house_type[0])
#             l += 1
#             # print(re.findall(r"\d+\D?\d+平米", house_content))  # 面积
#             house_area = re.findall(r"(\d+(.\d+)?)平米", house_content)  # 获得房屋面积
#             house_area2 = house_area[0]
#             sheet.write(h, l, house_area2[0])
#             l += 1
#             # print(re.findall(r"(?<=平米.\S).\S.", house_content))  # 朝向
#             house_toward = re.findall(r"(?<=平米.\S).\S.", house_content)  # 获得房屋朝向
#             sheet.write(h, l, house_toward)
#             l += 1
#             # print(re.findall(r"(?<=装\s\S\s).........", house_content))  # 楼层
#             if re.findall(r"(?<=装\s\S\s).........", house_content):  # 获得房屋楼层
#                 house_num = re.findall(r"(?<=装\s\S\s).........", house_content)
#             else:
#                 if re.findall(r"(?<=其他\s\S\s).........", house_content):
#                     house_num = re.findall(r"(?<=其他\s\S\s).........", house_content)
#                 else:
#                     house_num = re.findall(r"(?<=毛坯\s\S\s).........", house_content)
#             sheet.write(h, l, house_num)
#             l += 1
#             if not re.findall(r"(?<=本满五年.)\d+", house_content):  # 总价
#                 # print("总价:", re.findall(r"(?<=随时看房.)\d+", house_content))
#                 house_all_price = re.findall(r"(?<=随时看房.)\d+", house_content)  # 获得房屋总价
#             else:
#                 # print("总价:", re.findall(r"(?<=本满五年.)\d+", house_content))
#                 house_all_price = re.findall(r"(?<=本满五年.)\d+", house_content)
#             sheet.write(h, l, house_all_price)
#             l += 1
#             # print("单价:", re.findall(r"(?<=万)\d+,*\d+", house_content))  # 单价
#             house_single_price = re.findall(r"(?<=万)\d+,*\d+", house_content)  # 获得房屋每平方米单价
#             sheet.write(h, l, house_single_price)
#             l = 0
#             h += 1  # 换行
# savepath = r'C:\Users\LENOVO\Desktop\Lianjia.xls'  # 设置保存文件的路径
# book.save(savepath)  # 保存文件到目标路径


# 5 爬取QS大学排名
print('---------------爬取QS大学排名-----------------')
# url4 = "https://www.topuniversities.com/university-rankings/world-university-rankings/2024"
# head4 = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 SLBrowser/8.0.1.5162 SLBChan/109"}
# return_data5 = requests.get(url4, head4)
# if return_data5.ok:
#     return_data5.encoding = 'utf-8'
#     soup = BeautifulSoup(return_data5.text, "html.parser")
#     university_content = soup.findAll("div", attrs={"class": "_qs-ranking-data-row normal-row api-ranking-css normal-row"})
#     for university_data1 in university_content:
#         university_data2 = university_data1.findAll("div", attrs={"class": "crow"})
#         for university_name in university_data2:
#             name_content = university_name.findAllNext("div", attrs={"class": "col-lg-4 col-md-7 col-ms-7col-7"})
#             print('ok1')
#             for link_0_name in name_content:
#                 link_1_name = link_0_name.findAllNext("div", attrs={"class": "university-rank-row"})
#                 print("ok2")
#                 for link_2_name in link_1_name:
#                     link_3_name = link_2_name.findAllNext("div", attrs={"class": "univ-detail-right-pos"})
#                     print("ok3")
#                     for link_4_name in link_3_name:
#                         name = link_4_name.findAllNext("div", attrs={"class": "td-wrap"})
#                         print("ok4")
# else:
#     print("爬取失败")
