#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：PythonData 
@File    ：case1_Get.py
@Author  ：朱志文
@Date    ：2021/12/27 8:31 
'''

# import requests,json        #导入requests包
# url = 'http://www.cntour.cn/'
# strhtml = requests.get(url)        #Get方式获取网页数据
# # 注意观察url地址，它已经将参数拼接起来
# print('URL地址：', strhtml.url)
# # 响应状态码，成功返回200，失败40x或50x
# print('请求状态码：', strhtml.status_code)
# print('header信息:', strhtml.headers)
# print('cookie信息：', strhtml.cookies)
# print('响应的数据：', strhtml.text)
# # 如响应是json数据 ，可以使用 r.json()自动转换为dict
# # print('响应json数据', strhtml.json())
#
#
# def getHtmlText(url):
#     try:
#         res=requests.get(url)
#         res.raise_for_status()  #如果不是200 则产生异常
#         res.encoding=res.apparent_encoding
#         return res.text
#     except:#捕获异常
#         print('网络链接失败')
# print(getHtmlText(url))


'''京东'''
# import requests
# url = "https://item.jd.com/100018792490.html"
# headers = {"User-Agent": "chrome/5.0"}
# try:
#     r = requests.get(url, headers=headers)  #因为京东有user-agent限制所以要加入头部信息
#     r.raise_for_status()
#     print(r.status_code)
#     r.encoding = r.apparent_encoding
#     print(r.text[:1001])
# except:
#     print("爬取异常")


'''亚马逊'''
# import requests
# def getHTMLText(url):
#     try:
#         headers={'user-agent':'Mozilla/5.0'}
#         r=requests.get(url,headers=headers)
#         r.raise_for_status()#如果状态不是200，引发HTTPError异常
#         r.encoding=r.apparent_encoding
#         return r.text
#     except:
#         return '产生异常'
#
# if __name__ == "__main__":
#     url = "https://www.amazon.cn/dp/B08LNN7YRR/ref=s9_acsd_al_bw_c2_x_3_t?pf_rd_m=A1U5RCOVU0NYF2&pf_rd_s=merchandised-search-12&pf_rd_r=PRRR9JV5JSPJ7ZSV8ZSR&pf_rd_t=101&pf_rd_p=47d50545-7f7a-4514-bc0e-6247610beb4f&pf_rd_i=116169071"
#     print(getHTMLText(url))

'''百度 360 搜索'''
import requests
def getHTMlSearch(url,keyword):
    try:
        headers = {'User-Agent': 'Mozilla/5.0'}
        key={'q':keyword};
        res=requests.get(url,params=key,headers=headers)
        res.raise_for_status()
        res.encoding=res.apparent_encoding
        print(res.request.url)
        res.raise_for_status()
        print(len(res.text))
        print(res.text)
    except:
        print("爬取失败")
url='https://www.so.com/s?ie=utf-8&fr=none&src=360sou_newhome}'
keyword='刘德华'
getHTMlSearch(url, keyword)

# import requests,os
# '''网络图片的爬取和存储'''
# def getNetPict():
#     url='https://w.wallhaven.cc/full/g7/wallhaven-g71wr7.jpg'
#     root='D://Download//'
#     path=root+url.split('/')[-1]
#     try:
#         if not os.path.exists(root):#判断文件夹是否存在
#             os.mkdir(root) #创建文件夹
#         if not os.path.exists(path): #判断文件是否存在
#             r=requests.get(url)
#             with open(path,'wb') as f:
#                 f.write(r.content)#写入文件
#                 f.close()#关闭
#                 print("文件保存成功");
#         else:
#             print('文件已存在')
#     except:
#         print('爬取')
#
# getNetPict()

'''IP地址归属地查询'''
# import requests
# def selectIP(url,ip):
#     keyword={'ip':ip}
#     try:
#         res=requests.get(url,params=keyword)
#         res.raise_for_status()
#         res.encoding=res.apparent_encoding
#         print(res.text)
#     except:
#         print('爬取失败')
# url = "http://m.ip138.com/ip.asp?"
# ip = "125.220.159.160"
# selectIP(url, ip)
