# -*- conding:UTF-8 -*-

'''
@ Project: funny
@ File: book.py
@ Author: 韩成瑾
@ Date：2022/10/13 21:10

'''

import time

import requests
from lxml.html import clean

def get_qi_dian(url, flag):
    res = requests.get(url)

    html = clean.etree.HTML(res.content)
    title = html.xpath('//*[@id="j-readPage"]/head/title/text()')
    if flag == 0:
        content = html.xpath('/html/body/div[2]/div[3]/div[2]/div[1]/div[2]/div/div[2]/p')
    else:
        content = html.xpath("/html/body/div[2]/div[3]/div[2]/div[1]/div/div/div[2]/p")

    to_next = html.xpath('//*[@id="j_chapterNext"]')

    with open('大医无疆', 'a', encoding="utf-8") as f:
        f.write(title[0] + "\n")
        for i in content:
            if len(i.text) > 95:
                for j in range(len(i.text) // 95 + 1):
                    f.write(i.text[j*95:(j+1)*95] + "\n")
            else:
                f.write(i.text + "\n")

    flag += 1
    return to_next[0].get("href"), flag

with open('断点', 'r', encoding="utf-8") as f:
    url_default = f.readline().strip("\n")
    flag = int(f.readline())
    while 1:
        child_url, flag = get_qi_dian(url_default, flag)
        url_default = "https:" + child_url
        print("完成")
        time.sleep(0.5)
        f4  = open("断点","w+",encoding="utf-8")
        f4.write(url_default + "\n" + str(flag))
        f4.close()



# exit()
# AutoNextPage = True  # 自动获取下一页
# NextPageWait = 0  # 自动获取间隔，单位秒
#
# AutoNextUnit = False  # 自动获取下一章
# NextUnitWait = 10  # 自动获取间隔，单位秒
#
# CharNumEveryLine = 20  # 一行显示多少字
#
# if not AutoNextPage or not AutoNextUnit:
#     inp = input()  # 如果并非全自动获取，需要在控制台敲击回车获取接下来的内容
# else:
#     inp = ''
#
# while (inp != 'exit'):  # 如果并非全自动获取，在控制台输入exit退出程序
#
#     temp = 0
#     Text = ''
#     # 使用request去get目标网址
#     res = requests.get(url + child_url)
#     res.encoding = "UTF8"
#     soup = BeautifulSoup(res.text, "html.parser")
#     for descendant in list(soup.body.descendants):
#         if descendant.name != None:
#             if not descendant.name == 'a':
#                 if descendant.name == 'p':
#                     if not descendant.text in Text:
#                         Text += descendant.text
#                 elif descendant.name == 'div':
#                     if not descendant.text in Text:
#                         Text += descendant.text
#             else:
#                 if descendant.text == '下一页':
#                     temp = 1
#                     child_url = descendant.attrs['href']
#                 elif descendant.text == '下一章':
#                     temp = 2
#                     child_url = descendant.attrs['href']
#
#     Text = Text.replace('&nbsp;', ' ').replace('<br />', '\n').replace('\n\n', '\n') \
#         .replace('\n\n', '\n').replace('\n\n', '\n').replace('    ', '')
#
#     i = 0
#     for c in Text:
#         print(c, end='')
#         i += 1
#         if c == '\n':
#             i = 0
#         if i > CharNumEveryLine:
#             print('\n', end='')
#             i = 0
#
#     if temp == 1 and AutoNextPage:
#         time.sleep(NextPageWait)
#     elif temp == 2 and AutoNextUnit:
#         time.sleep(NextUnitWait)
#     else:
#         print('\nNextUrlTo:', url + child_url)
#         inp = input()
