# -*- coding:utf-8 -*-
# @Time  : 2019/11/21 19:16
# @Author: wangxb
import json
import os
import urllib

import requests
from bs4 import BeautifulSoup


def load_url(url):
    proxy_support = urllib.FancyURLopener({'http': 'zz.lltx.info:2015'})
    src = proxy_support.open(url)
    return src.read()


def htmlParser(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"
        # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"
    }
    result = requests.get(url, headers=headers).text
    soup = BeautifulSoup(result, 'html.parser')
    tests = soup.find_all('div', "timeout hide")
    # print(tests)
    return tests


def jsonPaser(apkUrl):
    '''
    解析应用宝应用市场
    :return:
    '''
    # url = 'https://sj.qq.com/myapp/cate/appList.htm?orgame=1&categoryId=0&pageSize=1&pageContext=1'
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"
    }
    response = requests.get(apkUrl, headers=headers).text
    objData = json.loads(response)['obj']
    # appName = objData[0]["appName"]
    # print(appName)
    return objData


def downloadAPK(apkUrl, localPKGURI):
    # url2 = "https://imtt.dd.qq.com/16891/apk/8FE049FA3896F1AA0E9BF31E50A23608.apk?fsname=com.nuomi_8.6.10_441.apk&csr=3554"
    r = requests.get(apkUrl, stream=True)
    with open(localPKGURI, "wb") as apk:
        for chunk in r.iter_content(chunk_size=1024):
            if chunk:
                apk.write(chunk)


if __name__ == '__main__':
    # objList = jsonPaser()
    # objLength = len(objList)
    # for i in range(objLength):
    #     apkUrl = objList[i]["apkUrl"]
    #     pkgName = objList[i]["pkgName"] + '.apk'
    #     appName = objList[i]["appName"]
    #     downloadAPK(apkUrl, pkgName)
    # result = os.system('tasklist | findstr "jd-gui"')
    # print(result)
    # url = 'https://www.google.com/search?biw=1920&bih=345&sxsrf=ACYBGNRV7r-z8SlV6QkE9rplma9oaCaYMA%3A1576495439797&ei=T2n3XaCgMIXC-gS-p4LgCw&q=inanchor%3A%E5%AE%89%E5%BE%BD+%E5%90%88%E8%82%A5+%E4%BC%A0%E5%AA%92+%E7%AD%96%E5%88%92+%E6%B4%BB%E5%8A%A8+%E5%B9%B4%E4%BC%9A+%E4%BC%9A%E5%B1%95+%E5%85%AC%E5%8F%B8+2019&oq=inanchor%3A%E5%AE%89%E5%BE%BD+%E5%90%88%E8%82%A5+%E4%BC%A0%E5%AA%92+%E7%AD%96%E5%88%92+%E6%B4%BB%E5%8A%A8+%E5%B9%B4%E4%BC%9A+%E4%BC%9A%E5%B1%95+%E5%85%AC%E5%8F%B8+2019&gs_l=psy-ab.12...0.0..6648...0.0..0.0.0.......0......gws-wiz.zNB5J8ObUN8&ved=0ahUKEwjgg7eVh7rmAhUFoZ4KHb6TALwQ4dUDCAs'
    url = 'https://www.baidu.com/s?wd=inanchor%3A%E5%AE%89%E5%BE%BD%20%E5%90%88%E8%82%A5%20%E4%BC%A0%E5%AA%92%20%E7%AD%96%E5%88%92%20%E6%B4%BB%E5%8A%A8%20%E5%B9%B4%E4%BC%9A%20%E4%BC%9A%E5%B1%95%20%E5%85%AC%E5%8F%B8%202019&rsv_spt=1&rsv_iqid=0x8101d749001c35e7&issp=1&f=8&rsv_bp=1&rsv_idx=2&ie=utf-8&tn=baiduhome_pg&rsv_enter=0&rsv_dl=tb&rsv_sug3=2&rsv_n=2&inputT=1736&rsv_sug4=1890'
    pblist = htmlParser(url)
    # pblist = load_url(url)
    print(pblist)
