# !python3      # 使用工具:PyCharm
# 用户登录名:yyds_l
# 项目名称: python爬虫     文件名称:001_python爬虫_获取hao123.com热点网站推荐
# 创建日期:2022/3/27      创建时间:12:01
# 2022/03/27 12:01

"""okt"""


# 定义请求对象
def res_R():
    url = "http://www.hao123.com"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
                      ' AppleWebKit/537.36 (KHTML, like Gecko)'
                      ' Chrome/92.0.4515.159 Safari/537.36'
    }

    res = urllib.request.Request(url=url, headers=headers)
    return res


# 发起请求
def data_open(res):
    try:
        html = urllib.request.urlopen(res)
        time.sleep(2)
        data = html.read().decode('utf-8')
        return data
        # print(data)
    except urllib.error.HTTPError:
        print('可能无法执行，机器人正在学习...')
    except urllib.error.URLError:
        print('可能无法执行，机器人正在学习...')


# 筛选数据
def html_shuju(data):
    try:
        tree = etree.HTML(data)
        # print(tree)
        print('正在爬取hao123.com热门网站推荐数据----------------')
        print('\n', '-' * 20)
        time.sleep(3)
        name = tree.xpath("//div//ul[@class='js_bd cls_bd']//a[@class='sitelink icon-site']/text()")
        print('已获取name；', name)
        time.sleep(2)
        url = tree.xpath("//div//ul[@class='js_bd cls_bd']//a[@class='sitelink icon-site']/@href")
        print('已获取url；', url)
        time.sleep(1)
        img = tree.xpath("//div//ul[@class='js_bd cls_bd']//a[@class='sitelink icon-site']/@style")
        print('已获取img：', img)
        print('-' * 20)
        time.sleep(1)
        data_json = []
        for i in range(len(name)):
            sp = r"[a-zA-z]+://[^\s]*"
            img_url = re.findall(sp, str(img[i]))
            # print(img_url[0])
            img_url = img_url[0].split(')')[0]
            # print(name[i], url[i], img_url)
            data = [
                {
                    "rank": i+1,
                    'name': name[i],
                    'url_open': url[i],
                    'url_img': img_url
                 },
            ]
            # print(data[0])
            data_json.append(data[0])
        return data_json
    except Exception as e:
        print(e)
        print('获取数据失败。')
        pass


# 保存数据
def with_fp(data_json):
    try:
        filename = 'hao123.json'
        print('\n正在下载数据...')
        time.sleep(1)
        with open(filename, 'w', encoding='utf-8') as fp:
            json.dump(data_json, fp, ensure_ascii=False)    # ensure_ascii=False 就不会用 ASCII 编码，中文就可以正常显示了
        print('\n\n\n', '-'*6, "数据下载成功", '-'*6)
    except Exception as err:
        print(err)
        print('保存数据失败')


# 运行 代码
if __name__ == '__main__':
    import urllib.request
    import urllib.error
    from lxml import etree
    import json
    import time
    import re

    # 定义请求头
    res = res_R()

    # 1.获取数据
    data = data_open(res)

    # 2.筛选数据
    data_json = html_shuju(data)

    # 3.保存数据
    with_fp(data_json)