import sys, re, requests
import BaseFunction as BF, MyClass as mc
from bs4 import BeautifulSoup

HomeLink = "https://www.soxs.cc"
searchAction = '''https://www.soxs.cc/search.html'''


# 合成字典
def searchParms(name):
    return {"searchtype": "all", "searchkey": str(name)}


def AddBooks(li, books):
    type1 = li.contents[0].text[1:-1]
    name = li.contents[1].text
    author = li.contents[3].text
    href = HomeLink + li.contents[1].contents[0]['href']
    source = '搜书网'
    utime = li.contents[4].text
    book = mc.book(type1, name, author, href, source, utime)
    books.append(book)


def GetSearchPageResultBylink(link, books):
    while True:
        soup2 = BF.GetBSoup(link)
        ul = soup2.findAll('ul')[2].contents
        lic = ul[1::2]
        lic.pop(0)
        for li in lic:
            AddBooks(li, books)
        nextp = soup2.find('a', text='下一页')
        if nextp is not None:
            link = HomeLink + nextp['href']
        else:
            return books


def GetSearchPageResultByName(name):
    books = []
    # 传进来的是名字
    parmDic = searchParms(name)
    r = requests.post(searchAction, params=parmDic, headers=BF.headers)
    soup = BF.Getsoup(r)
    # 获取搜索页所有li
    ul = soup.findAll('ul')[2].contents
    lic = ul[1::2]
    lic.pop(0)
    for li in lic:
        AddBooks(li, books)
    nextP = soup.find('a', text='下一页')
    if nextP is not None:
        GetSearchPageResultBylink(HomeLink + nextP['href'], books)
    return books


def GetChapterList(link):
    soup = BF.GetBSoup(link)
    Cs = []
    ChapterList = []
    div = soup.findAll('div', class_='novel_list')[0]
    dls = div.contents[1::2]
    for dl in dls:
        Cs = Cs + dl.contents[3::2]
    index = 1
    for c in Cs:
        href = HomeLink + c.contents[0]['href']
        chapter = mc.Section(index, c.text, href, 'temp')
        ChapterList.append(chapter)
        index += 1
    return ChapterList


def GetZW(link, name):
    soup = BF.GetBSoup(link)
    div = soup.find('div', class_='content')
    contents = div.contents[2:-12:3]
    zw = name + '<br><br>' + '<br><br>'.join(contents).replace('\n', '<br><br>').replace('\r', '<br><br>')
    return zw

# 获取正文 包含html标签
def GetZWHaveHtml(link):
    soup = BF.GetBSoup(link)
    div = soup.find('div', class_='content')
    contents = div.contents[2:-12:3]
    zw = '<br><br>' + '<br><br>'.join(contents).replace('\n', '<br><br>').replace('\r', '<br><br>')
    return zw


def GetBody(zjc):
    i = 1
    a = len(zjc)
    for zj in zjc:
        zj.MainBody = GetZW(zj.href, zj.name)
        print(str(i) + '/' + str(a))
        i += 1
    return zjc
