# -!- coding: utf-8 -!-
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#作者：cacho_37967865
#博客：https://blog.csdn.net/sinat_37967865
#文件：getWebTxt.py
#日期：2019-07-07
#备注：temp
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

import requests
import random
from bs4 import BeautifulSoup
from pacong.txtdeal.base import deal_replace
from pycacho.cachobase.logger import Logger
import re

logger = Logger("temp").get_log()

agent = ["Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre,'Accept-Language':'zh-CN,zh;q=0.9'",
         "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36,'Accept-Language':'zh-CN,zh;q=0.9'"]

headers = {
    'User-Agent': random.choice(agent)
}

# 在requests做请求的时候，为了避免ssl认证，可以将requests.get(url,verify=False), 但是这么设置会带来一个问题，日志中会有大量的warning信息, 可以配置下面4行
session = requests.Session()
session.keep_alive = False
requests.adapters.DEFAULT_RETRIES = 5
requests.packages.urllib3.disable_warnings()

def temp():
    url = 'https://www.dizishu.com/files/article/html555/83/83407/36930988.html'
    resp = requests.get(url, headers=headers, timeout=60, verify=False)
    resp.encoding = 'gbk'
    html = resp.text
    cctxt = re.findall(r'cctxt.replace(.*);', html)
    info = re.findall(r'cctxt=\'(.*)\';', html)[0].replace('<br /><br />', '\n').replace('<br><br>', '\n')
    soup = BeautifulSoup(info, 'html5lib').text  # "lxml"解析器丢失数据
    tt =deal_replace(soup,cctxt)
    print(tt)

if __name__ == '__main__':
    temp()