import requests,os
from bs4 import BeautifulSoup
from configparser import ConfigParser


file = '' #文件保存句柄
config = '' #使用的配置文件
threads = 0

class ConfigObject:
    pass
    

def InitConfig():
    name = 'config.txt'
    if os.path.exists(name) :
        return 0
    file = open(name,"w+",encoding='utf-8')
    data = '''
[MAIN_NODE]
#使用下面的那个节点
main=CONFIG


########################################################
#################这个节点的配置数据#####################
########################################################
[CONFIG]
#网站的主页，不要在最后加  /
url=https://www.XXX.com

#书的主页 不加url内容
index=/XXX/index.html

#保存书的名字
name=XXX.txt

#开始位置 书从哪个位置（大于等于1）开始获取，0全部获取
start_num=0

#是否添加新的标题，num为标题的序号
new_title=0
new_title_num=1

#跳过标签的前多少个不获取。
skip_num=0

#多少个线程工作，未使用
thread=1

#==========================================# NO1
#获取书的目录，使用什么方式。 
#on书的目录比较难获取，就可以使用下面的方法 0关闭使用
list_top_on=0
#使用class/id定位
list_top_flag=class
# class/id 名字
list_top_name=class-name
#如果有相同的名字，那就找到指定的那个，不使用填-1
list_top_index=-1
#==========================================#

#目录所在的标签
list=ul li



#-------------------------------------------# NO2
#文章显示的内容获取
#找在那个地方文字，同NO1
#通过class/id找
test_flag=id
test_name=content
test_index=-1
#-------------------------------------------#



########################################################
#################这个节点的配置数据#####################
########################################################
[OTHER_CONFIG]
#其他的配置
name=.....


    '''
    file.write(data);
    return -1

def readConfig():
    # 初始化解析器
    config = ConfigParser()
    # 读取并解析文件
    config.read('config.txt',encoding='utf-8')
    nodeName = config['MAIN_NODE']['main']
    obj = ConfigObject()
    for key, value in config[nodeName].items():
        setattr(obj, key, value)
    print(obj)
    return obj
    
def StrToNumOne(num):
    if num == "零" or num == "0":
        return 0
    if num == "一" or num == "1":
        return 1
    if num == "二" or num == "2":
        return 2
    if num == "三" or num == "3":
        return 3
    if num == "四" or num == "4":
        return 4
    if num == "五" or num == "5":
        return 5
    if num == "六" or num == "6":
        return 6
    if num == "七" or num == "7":
        return 7
    if num == "八" or num == "8":
        return 8
    if num == "九" or num == "9":
        return 9
    if num == "万":
        return 10000
    if num == "千":
        return 1000
    if num == "百":
        return 100
    if num == "十":
        return 10
    return -1;

def StrToNum(numStr):
    #print(numStr)
    num = 0
    i = 1;
    #下面是数字
    if numStr[1] >='0' and numStr[1] <='9':
        num = 0;
        while True:
            ret = StrToNumOne(numStr[i]);
            if ret == -1:
                return num;
            num = num*10+ret;
            i = i+1;

    #下面是中文
    if StrToNumOne(numStr[1]) == 10:
        if StrToNumOne(numStr[2]) != -1:
            return 10 + StrToNumOne(numStr[2]);
        return 10;
    while True:
        ret = StrToNumOne(numStr[i]);
        if ret == -1:
            return num;
        if ret == 0:
            i = i+1
            continue

        ret2 = StrToNumOne(numStr[i+1]);
        if ret2 == -1:
            return num + ret;
        num = num + (ret*ret2);
        i = i+2;


def DoGetOnePage(baseUrl,title):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
    }
    req = requests.get(url=config.url + baseUrl,headers=headers)
    req.encoding = "utf-8"
    soup = BeautifulSoup(req.text,features="html.parser")
    # print soup.name 是 name 和 attrs   soup.p.string
    #content = soup.find("div",class_="content")# "div",class_="detail_head"

    if config.test_flag == 'class': 
        textHtml = soup.find_all("div",class_=config.test_name)
    if config.test_flag == 'id': 
        textHtml = soup.find_all("div",id=config.test_name)
    
    if int(config.test_index) != -1: 
        textHtml = textHtml[int(config.test_index)]
    
    #print(text)
    try:
        # 尝试执行的代码
        saveText = textHtml.get_text()#str1 = text.text.strip()
        pass
    except Exception as e:
        # 处理所有异常的代码
        saveText = ""#str1 = text.text.strip()
        print("err......")

    if int(config.new_title) == 1:
        title = "第"+str(config.new_title_num)+"章 "+title
        config.new_title_num = int(config.new_title_num) + 1;
    file.write(title+"\n")
    file.write(saveText+"\n")
    print(title," end...")
    #threads = threads -1;


def GetHtmlData():
    global config,file
    if InitConfig() == -1:
        print("init config ....")
        return
    config = readConfig()
    file = open(config.name,"w+",encoding='utf-8')
    
    user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
    headers = {
            'User-Agent': user_agent
            }
    req = requests.get(url=config.url+config.index,headers=headers)
    req.encoding = "utf-8"
    soup = BeautifulSoup(req.text,features="html.parser")

    if int(config.list_top_on) == 1:
        if config.list_top_flag == 'class':
            soup = soup.find_all("div",class_=config.list_top_name)
        if config.list_top_flag == 'id':
            soup = soup.find_all("div",id=config.list_top_name)
        if len(config.list_top_index) != -1:
            soup = soup[len(config.list_top_index)]

    lists = soup.select(config.list)
    #print(lists)
    lens = len(lists)
    i = 0 + int(config.skip_num) #跳过部分

    foundStart=True
    #遍历所有的章节
    while True:
        a = lists[i].find("a")
        try:
            path = a.attrs.get("href")
        except Exception as e:
            print("err path no... index:"+str(i))
            i = i+1
            continue
        
        if int(config.start_num) != 0:
            page = StrToNum(a.text.strip())
            if page < int(config.start_num) and foundStart: # 找到num开始的位置
                i = i + 1
                continue
            foundStart = False
        
        try:
            DoGetOnePage(path,a.text.strip())
        except Exception as e:
            print("err path :"+path+" index:"+str(i))
        
        i = i+1
        if i > lens - 1:
            break

    print("all end.................")
    input("")
    file.close()
    print("all end.................")
    

if __name__ == "__main__":
    GetHtmlData()

            
    

















    
