# -*- coding:utf-8 -*-
# @Time     : 2021/4/6 9:01

import requests
import random
import re
from pymongo import MongoClient

# 忽略证书警告
requests.packages.urllib3.disable_warnings()
ip = "jt"
client = MongoClient('mongodb://root:MgXwM3Toe5@192.168.7.53:30228')
proxies = client["proxy"]["proxies_ip_dongtai"]
proxies_list = [  # 静态代理列表
    "http://pc0308wd:pc0308wd@140.246.91.53:888",
    "http://pc0308wd:pc0308wd@113.125.125.2:888",
    "http://pc0308wd:pc0308wd@203.57.232.249:888",
    "http://pc0308wd:pc0308wd@113.125.9.8:888",
    "http://pc0308wd:pc0308wd@42.123.125.53:888",
]


def get_ua():
    first_num = random.randint(55, 62)
    third_num = random.randint(0, 3200)
    fourth_num = random.randint(0, 140)
    os_type = [
        '(Windows NT 6.1; WOW64)', '(Windows NT 10.0; WOW64)', '(X11; Linux x86_64)',
        '(Macintosh; Intel Mac OS X 10_12_6)'
    ]
    chrome_version = 'Chrome/{}.0.{}.{}'.format(first_num, third_num, fourth_num)

    ua = ' '.join(['Mozilla/5.0', random.choice(os_type), 'AppleWebKit/537.36',
                   '(KHTML, like Gecko)', chrome_version, 'Safari/537.36']
                  )
    return ua


headers = {
    "User-Agent": get_ua()
}


# 使用动态代理
def get_ip_dt():
    items = list(proxies.find({}))
    item = random.choice(items)
    ip = item.get("ip")
    new_proxy = {
        "http": ip,
    }
    # print(f"已设置代理{ip}")
    return new_proxy


# 使用静态代理
def get_ip_jt():
    ip = random.choice(proxies_list)
    new_proxy = {
        "http": ip,
    }
    # print(f"已设置代理{ip}")
    return new_proxy


# 选择使用 静态代理 或者 动态代理
def get_ip():
    if ip == "jt":
        return get_ip_jt()
    elif ip == "dt":
        return get_ip_dt()


def parseUrl(url):
    try:
        resp = requests.get(url, headers=headers, proxies=get_ip(), verify=False)
        return resp
    except Exception as e:
        print(e, "--> parseUrl")
        return None


def parsePdf(url):
    resp = parseUrl(url)
    if resp:
        html = resp.text
        html = html.replace(r'\\/\\/', '/')
        html = html.replace(r'\\/', '/')
        comp = re.compile(r'\\"(https:.*?0.json?.*?)\\"}')
        urls = comp.findall(html)
        print(len(urls))

        result = ""
        for url in urls:
            url = url.replace("/w", "//w")
            resp = parseUrl(url)

            # 解决中文乱码
            text = resp.content.decode("unicode-escape")
            # $1: 内容, $2: 是否换行的信号
            datas = re.findall(r'"c":"(.*?)".*?"y":(.*?),', text)

            y = None
            for data in datas:
                if not y == data[1]:
                    y = data[1]
                    n = '\n'
                else:
                    n = ''
                result += n
                result += data[0]

        print(result)


def main():
    url = "https://wenku.baidu.com/view/5fd3a55b3d1ec5da50e2524de518964bcf84d2b6"
    parsePdf(url)


if __name__ == '__main__':
    main()
