import requests
import js2py
from bs4 import BeautifulSoup
from Title import Title
from Chapter import Chapter

pre_url = "http://www.qiman58.com"
get_surplus_chapter_url = "http://www.qiman58.com/bookchapter/"

def add_img(img_url_suf):
    img_url = pre_url + img_url_suf
    resp = requests.get('http://www.qiman58.com/16041/1437377.html')
    html = resp.text
    soup = BeautifulSoup(html, "html.parser")
    script_list = soup.find_all('script')
    for script in script_list:
        if 'eval' in str(script):
            ss = js2py.eval_js(r"" + script.text[6:] + "")
            img_list = ss[ss.index('[') + 1:ss.index(']')].replace('"','')

    # for ul_label in soup.find_all('div', id='mainView'):
    #     print(ul_label)
        # img_label_list = ul_label.select('img')
        # for img_src in img_label_list:
        #     print(img_src)


def add_chapter(title_no):
    print("书名" + title_no)
    title_url = pre_url + title_no
    chapter_href_list = []
    resp = requests.get(title_url)
    html = resp.text
    soup = BeautifulSoup(html, "html.parser")
    for chapter_labe in soup.find_all('div', id='chapter-list1'):
        chapter_a_list = chapter_labe.select('a')
        for chapter_a in chapter_a_list:
            # 添加章节名字
            chapter_obj = Chapter(chapter_a['href'], chapter_a.text)
            chapter_href_list.append(chapter_obj)
    title_no_new = title_no[1:]
    title_no_new = title_no_new[:title_no_new.find('/')]
    request_data = {'id': title_no_new, 'id2': 1}
    surplus_resp = requests.post(get_surplus_chapter_url, request_data)
    surplus_chapter_list = surplus_resp.json()
    for surplus_chapter in surplus_chapter_list:
        chapter_obj = Chapter(title_no + surplus_chapter['chapterid'] + '.html', surplus_chapter['chaptername'])
        chapter_href_list.append(chapter_obj)

def addTitle(titleObj):
    print(titleObj.__dict__)

def getTitle(titleList, page_url):
    next_page_url = pre_url + page_url
    resp = requests.get(next_page_url)
    html = resp.text
    soup = BeautifulSoup(html, "html.parser")
    for items in soup.find_all('div', class_='bookList_3'):
        titles = items.select('a')
        for title in titles:
            book_href = title["href"]
            imgs = title.select('img')
            for img in imgs:
                if 'title' in img.attrs:
                    titleObj = Title(img['src'], img['title'])
                    titleObj.set_category(1)
                    titleObj.set_category(book_href)
                    titleList.append(titleObj)
    pageList = []
    textList = []
    for page_pagination in soup.find_all('div', class_='page-pagination mt20'):
        a_list = page_pagination.select('a')
        for a in a_list:
            if 'href' in a.attrs:
                pageList.append(a["href"])
                textList.append(a.text)
    if textList[len(textList) - 1] == " > ":
        next_page = pageList[len(pageList) - 1]
        getTitle(titleList, next_page)


if __name__ == '__main__':
    # resp = requests.get(url)
    # html = resp.text
    # soup = BeautifulSoup(html, "html.parser")

    # titleList = []
    # getTitle(titleList, "/sort/1-9.html")
    # for titile in titleList:
    #     print(titile.__dict__)

    # add_chapter('/21429/')
    add_img('/16041/1437377.html')
    # print(title["href"])
    # item = items.select('img')
    # for img in item:
    #     url = img['src']
    #     if "https" not in url:
    #         continue
    #     print(url)
    # for moreDiv in soup.find_all('div', class_='page_moreDiv'):
    #     pages = moreDiv.select('a')
    #     for page in pages:
    #         if 'href' in page.attrs:
    #             print(page["href"])
    #             pageList.add(page["href"])
    # for page in pageList:
    #     newUrl = url[0:22] + page
    #     resp = requests.get(newUrl)
    #     print(resp.text)
