"""
必须参数：正文网址

"""

import BaseFunction as BF
import re
from bs4 import BeautifulSoup
import sys
import time
import MyClass as mc


SearchLink = 'https://www.boquge.com/search.htm?keyword='
HomeLink = 'https://www.boquge.com'


def Getsoup(code):
    # codetxt = code.text.encode('gbk').decode('utf-8')
    # print(codetxt)
    # 去<br>在网页显示的时候需要去掉
    # codetxt = re.sub("<br/>", "\n", code.text)
    soup = BeautifulSoup(code.text, "html.parser")
    return soup


# 获取正文
def GetZW(aimLink):
    time.sleep(0.3)
    code = BF.GetHtmlCode(aimLink)

    soup = BeautifulSoup(code.text, "html.parser")
    # 删除js代码
    adc = soup.find_all('div', class_='gad2')
    # print(len(adc))
    for ad in adc:
        ad.decompose()
    # 获取章节名称
    ZJnum = soup.find('h1')
    # 获取正文
    ZW = soup.find('div', id='txtContent')

    txt = ZJnum.text + r'<br/>&nbsp;&nbsp;' + '  ' + \
        ZW.text.strip() + r'<br/>&nbsp;&nbsp;&nbsp;&nbsp;'
    # 去除广告
    txt = re.sub('你正在阅读,如有错误,请及时联系我们纠正！', '', txt)
    txt = re.sub(r"\u3000\u3000", r'<br/>&nbsp;&nbsp', txt)
    return txt


# 批量获取正文 参数：章节集合 返回 带正文的集合
def GetBody(zjc):
    i = 1
    lens = len(zjc)
    for zj in zjc:
        zj.MainBody = GetZW(zj.href)
        print("章节:"+str(i)+'/'+str(lens))
        i += 1
    return zjc


# 获取搜索页中的所有小说
def Getbook(soup):
    lic = soup.find_all('li')
    bc = []
    for li in lic:
        # 子节点数量必须为11 \n也占一个contents
        if len(li.contents) == 11:
            href = HomeLink + li.contents[3].contents[0]['href']
            # 直接存章节目录地址
            href = re.sub('xs', 'book', href)
            href = re.sub('/index.html', '/', href)
            if li.contents[9].next == '\n':
                continue
            time = li.contents[9].next.text
            b = mc.book(li.contents[1].text, li.contents[3].text,
                        li.contents[7].text, href, "笔趣阁", time)
            # b.PrintInfo()
            bc.append(b)
    return bc


# 搜索 返回的是book集合
def GetSearchResult(s):
    # dict1={}
    # 拼接搜索字符串
    l = SearchLink + s
    bc = []
    while True:
        code = BF.GetHtmlCode(l)
        soup = Getsoup(code)
        bc = bc + Getbook(soup)
        # 前往下一搜索页
        try:
            tag_pagelink = HomeLink + soup.find('a', text='»')['href']
            l = re.sub('encodedKeyword', s, tag_pagelink)
            # print(l)
        except:
            break
    return bc


# 爬取章节列表 返回章节集合
def ZJIndex(link):
    time.sleep(0.3)
    code = BF.GetHtmlCode(link)
    soup = Getsoup(code)
    ul = soup.find('ul', id="chapters-list").contents
    zjc = set()
    i = 1
    for l in ul:
        if l == '\n' or len(l.attrs) != 0:
            continue
        s = mc.Section(i, l.text, HomeLink + l.next['href'], 'none')
        # s.PrintInfo()
        zjc.add(s)
        i += 1
    return zjc


def GetCharpetNumber(soup):
    ul = soup.find('ul', id="chapters-list").contents
    return ul

