import requests,re,os
from lxml import etree
# from urllib.parse import quote
# from search import SearchBook



class Book:
    def __init__(self,keyword):
        # self.file_path = 'D:\A\\book\\1.txt'

        self.file_path = f'D:\A\\book\\{keyword}.txt'
        if os.path.exists(self.file_path):
            os.remove(self.file_path)

        self.file = open(self.file_path,'a',encoding='utf-8')


    def save_file(self,one):
        self.file.write(one)

    def close_file(self):
        self.file.close()

    # def get_base_url(self,keyword):
    #     s = SearchBook(keyword)
    #     url_list = s.select_book()
    #     if url_list is not None:
    #         print(url_list)
    #         return url_list

    def get_html_info(self,base_url):
        # base_url = self.get_base_url(keyword)
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
        }

        url = ''.join('https://www.bqgka.com'+ str(base_url))
        # print(url)

        res = requests.get(url,headers=headers).content.decode('utf-8')
        html = etree.HTML(res)
        n = html.xpath('//div[@class="listmain"]//dd/a/@href')
        n.remove('javascript:dd_show()')
        # print(n)
        self.iterator = n.__iter__()



    # def get_all_chapter(self):
    #     # html = self.get_html_info()
    #     unfold_name = self.html.xpath('//div[@class="listmain"]/dl/dd/a/text()')
    #     unfold_url = self.html.xpath('//div[@class="listmain"]/dl/dd/a/@href')
    #     fold_name = self.html.xpath('//div[@class="listmain"]/dl/span//a/text()')
    #     fold_url = self.html.xpath('//div[@class="listmain"]/dl/span//a/@href')
    #     unfold_name[10:11] = fold_name
    #     unfold_url[10:11] = fold_url
    #     for i in range(len(unfold_name)):
    #         return unfold_url[i]
    #     #     yield unfold_url[i]
    #         # print(unfold_url[i])
    #
    def get_one_chapter(self):
        chapter_num = 1
        while True:
            try:
                iterator = self.iterator.__next__()
                # print(iterator)
                headers = {
                    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                }
                url = f'https://www.bqgka.com{iterator}'
                content = requests.get(url, headers=headers).content.decode('utf-8', 'ignore')
                # print(content)
                # return content
                one_text = etree.HTML(content)
                con = one_text.xpath('//div[@class="Readarea ReadAjax_content"]/text()')
                del con[-2]
                # print(type(con))
                iterator = con.__iter__()
                while True:
                    try:
                        one = iterator.__next__()
                        one = re.sub(r'\u3000','',one)
                        # print(one)
                        self.save_file(one + '\n')
                        # yield one
                    except StopIteration:
                        break
                print(f'第{chapter_num}章下载完成')
                chapter_num += 1
            except StopIteration:
                break







# if __name__ == '__main__':
#     b=Book()
#     keyword = quote(input('请输入要搜索的名：'))
#     b.get_base_url(keyword)
    # b.get_html_info()
    # b.get_one_chapter()
    # b.close_file()

# if __name__ == '__main__':
#     keyword = quote(input('请输入要搜索的名：'))
#     s = SearchBook(keyword)
#     # s.get_book_info()
#     s.select_book()
