from applications.configs.func import chinese_to_num


# 笔趣阁小说抓取
class BQG:
    browser = ""  # 浏览器句柄

    base_url = "https://www.biqugewxw.com"
    # 搜索书籍地址
    search_books_url = "https://www.biqugewxw.com/search/"

    def search_books(self, base_object, novel_name):
        # 打开网页, 并搜索
        base_object.send_message_chick(self.search_books_url, "ID", "searchkey", novel_name)
        contents = base_object.parse_select(".category-div")
        result_contents = []
        for content in contents:
            content_div = content.find("div")
            a = content_div.select(".flex>a")[0]
            author = content_div.find("span").text
            result_contents.append(
                {"name": a.text, "address": self.base_url + a.attrs["href"], "author": author, "chapter": 1})

        return result_contents

    # 获取章节信息
    def get_characters(self, base_object, novel_url, start_chapter, end_chapter):
        """
            获取章节信息
        :param base_object:     父类对象
        :param novel_url:       小说地址
        :param start_chapter:   开始章节
        :param end_chapter:     结束章节
        :return:
        """
        soup = base_object.wait_css_show(novel_url, "CLASS_NAME", "info-chapters", "-1")
        chapters = soup.select(".info-chapters")[-1].select("a")
        chapter_list = []
        for c in chapters:
            info_list = str(c.text).split(" ")
            if len(info_list) <= 1:
                continue
            chapter = int(chinese_to_num(info_list[0].strip("第").strip("章")))
            if start_chapter <= chapter <= end_chapter:
                name = info_list[1]
                address = self.base_url + c.attrs["href"]
                chapter_list.append({"name": name, "address": address, "chapter": chapter})
            else:
                if chapter > end_chapter:
                    break

        return chapter_list

    # 获取小说内容
    def get_content(self, base_object, chapter_url):
        """
            获取小说内容
        :param base_object:     父类对象
        :param chapter_url:       小说地址
        :return:
        """
        content_list = []
        for page in range(10):
            p = page + 1
            new_chapter_url = chapter_url if p == 1 else chapter_url.rstrip(".html") + "_{}.html".format(p)
            soup = base_object.wait_css_show(new_chapter_url, "ID", "article", "-1")
            # 添加内容
            content = soup.select("#article")[0].text
            content_list.append(content)
            # 查询是否还有下一页
            next_url = soup.find(id="next_url").text.strip()
            if next_url != '下一页':
                break

        return "\n".join(content_list)

    def get_last_new_chapter(self, base_object, novel_url):
        """
        获取最新章节
        :param base_object:
        :param novel_url:
        :return:
        """
        # 获取章节信息
        soup = base_object.wait_css_show(novel_url, "CLASS_NAME", "info-chapters", "-1")
        a_list = soup.select(".info-chapters")[0].select("a")
        chapter = 0
        for a in a_list:
            try:
                chapter = int(chinese_to_num(str(a.text).split(" ")[0].strip("第").strip("章")))
                if chapter:
                    break
            except:
                continue
        return chapter