# -*- coding:utf-8 -*-
# @Time     : 2021/4/6 11:03

import requests
import random
import re
from pymongo import MongoClient

requests.packages.urllib3.disable_warnings()

ip = "dt"
client = MongoClient('mongodb://root:MgXwM3Toe5@192.168.7.53:30228')
proxies = client["proxy"]["proxies_ip_dongtai"]
proxies_list = [  # 静态代理列表
    "http://pc0308wd:pc0308wd@140.246.91.53:888",
    "http://pc0308wd:pc0308wd@113.125.125.2:888",
    "http://pc0308wd:pc0308wd@203.57.232.249:888",
    "http://pc0308wd:pc0308wd@113.125.9.8:888",
    "http://pc0308wd:pc0308wd@42.123.125.53:888",
]


def get_ua():
    first_num = random.randint(55, 62)
    third_num = random.randint(0, 3200)
    fourth_num = random.randint(0, 140)
    os_type = [
        '(Windows NT 6.1; WOW64)', '(Windows NT 10.0; WOW64)', '(X11; Linux x86_64)',
        '(Macintosh; Intel Mac OS X 10_12_6)'
    ]
    chrome_version = 'Chrome/{}.0.{}.{}'.format(first_num, third_num, fourth_num)

    ua = ' '.join(['Mozilla/5.0', random.choice(os_type), 'AppleWebKit/537.36',
                   '(KHTML, like Gecko)', chrome_version, 'Safari/537.36']
                  )
    return ua


headers = {
    "User-Agent": get_ua()
}


# 使用动态代理
def get_ip_dt():
    items = list(proxies.find({}))
    item = random.choice(items)
    ip = item.get("ip")
    new_proxy = {
        "http": ip,
    }
    # print(f"已设置代理{ip}")
    return new_proxy


# 使用静态代理
def get_ip_jt():
    ip = random.choice(proxies_list)
    new_proxy = {
        "http": ip,
    }
    # print(f"已设置代理{ip}")
    return new_proxy


# 选择使用 静态代理 或者 动态代理
def get_ip():
    if ip == "jt":
        return get_ip_jt()
    elif ip == "dt":
        return get_ip_dt()


def parseUrl(url):
    try:
        resp = requests.get(url, headers=headers, proxies=get_ip(), verify=False)
        return resp
    except Exception as e:
        print(e, "--> parseUrl")
        return None


def parseDoc(url):
    resp = parseUrl(url)
    html = resp.text
    html = html.replace(r'\\/\\/', '/')
    html = html.replace(r'\\/', '/')
    comp = re.compile(r'\\"(https:.*?0.json?.*?)\\"}', re.S)
    urls = comp.findall(html)
    result = ""
    for url in urls:
        url = url.replace("/w", "//w")
        resp = parseUrl(url)
        # 解决中文乱码
        text = resp.content.decode("unicode-escape")
        y = None
        datas = re.findall(r'"c":"(.*?)".*?"y":(.*?),', text)
        for data in datas:
            # 当item[1]的值与前面不同时，代表要换行了
            if not y == data[1]:
                y = data[1]
                n = '\n'
            else:
                n = ''
            result += n
            result += data[0]
    print(result)


def main():
    url = "https://wenku.baidu.com/view/e2e82e8b54270722192e453610661ed9ac5155d8"
    parseDoc(url)


if __name__ == '__main__':
    main()
