from bs4 import BeautifulSoup
from typing import Optional
from WeirLinksCrawler import getLinks
from WeirdCommon import getContext,getContextByEncode
from crawler import config
import opencc
from file_dir import file_retrieve
import time
from db.NovelInfoDBOperator import NovelInfo, DBOperator
from requests.exceptions import ConnectionError
from file_dir import file_operator

output_path_prefix = config.generalConfig.regular_output_path
class TianlaiNovelCrawler(object):
    def __init__(self, home, prefix, replacement, title='自定义'):
        self.novel_home = home
        self.novel_title = title
        self.prefix = prefix
        self.novel_summary = ''
        self.novel_section_list = []
        self.repalcement = replacement

    # https://www.tzkczaa.com/ 天籁小说网
    def tianlainovel_meta_analyzer(self):
        content = getContext(self.novel_home)
        self.novel_summary = content.find(id='intro').text
        section_list = content.find_all('dd')
        step = 0
        for item in section_list:
            # if step < 696:
            #     print(step)
            #     step = step + 1
            #     continue
            sec_title = item.text
            sec_link = item.find('a').get('href', None).replace('//', '')
            self.novel_section_list.append((sec_title,sec_link))
            step = step + 1
        print(self.novel_section_list)

    def tianlaisection_content_analyzer(self, url):
        soup = getContext(url)
        content = soup.find('div', id='content')
        # 查找所有的<p>标签
        ps = content.find_all('div',id='ccc')

        # 遍历所有<p>标签，提取文本并添加换行符
        text_with_newlines = []
        for p in ps:
            # 提取<p>标签内的文本
            text = p.get_text()
            text = text.replace(self.repalcement, "")
            # 添加换行符并追加到结果列表中
            text_with_newlines.append(text + '\n')

        # 将列表中的文本合并为单个字符串
        cleaned_text = ''.join(text_with_newlines)
        return cleaned_text

    # https://www.boquku.com/ 笔趣阁
    def boqukunovel_meta_analyzer(self):
        content = getContextByEncode(self.novel_home,'utf-8')
        self.novel_summary = content.find('div', {'class','jieshaokaishi'}).text
        section_list = content.find('div', {'class': 'zhangjiekaishi'})
        item_list = section_list.find_all('li')
        step = 0
        for item in item_list:
            # if step < 696:
            #     print(step)
            #     step = step + 1
            #     continue
            item_class = ''
            try :
                item_class = item.get('class')[0]
            except TypeError:
                item_class = ''
            if item_class == 'volumn':
                continue
            sec_title = item.text
            sec_link = item.find('a').get('href', None).replace('//', '')
            self.novel_section_list.append((sec_title,sec_link))
            step = step + 1
        print(self.novel_section_list)

    def boqukusection_content_analyzer(self, url):
        soup = getContextByEncode(url,'utf-8')
        content = soup.find('div', id='booktext')
        context_txt = content.text
        context_txt = context_txt.replace('　　', '\n')
        return context_txt
    '''
     writing_pattern的选项：w：覆盖写入；a：追加写入
    '''
    def file_writer(self, file_name, file_content, writing_pattern='w'):
        with open(file_name, writing_pattern, encoding='utf-8') as f:
            f.write(file_content)

    # 需要按照不同网站调整content_analyzer，目前有：趣笔阁、天籁小说网等
    def novel_crawler(self):
        file_operator.mkdir(output_path_prefix + self.novel_title)
        self.file_writer(output_path_prefix + self.novel_title + r'\0000Summary.txt', self.novel_summary)
        step = 1
        for section in self.novel_section_list:
            if step > 894:
                sec_content = self.boqukusection_content_analyzer(self.prefix + section[1])
                print('输出：' + section[0])
                self.file_writer(output_path_prefix + self.novel_title + '\\' +
                                 str(step).zfill(4) + section[0].replace('\t',' ').replace('?','') + '.txt', sec_content)
            step = step + 1



if __name__ == '__main__':
    crawler = TianlaiNovelCrawler(r'http://www.pksge.la/jinhuadesishiliuyizhongzou/',r'http://www.pksge.la/jinhuadesishiliuyizhongzou/',
                                  "",
                                  '进化的四十六亿重奏')
    crawler.boqukunovel_meta_analyzer()
    crawler.novel_crawler()
    print('Finish!')