from datetime import datetime

from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from bs4 import BeautifulSoup
import re

from newsCaptureBase import NewsCaptureBase

from constants import END_KEYWORDS


class GetArticleFromCsw(NewsCaptureBase):
    def __init__(self, news_url):
        super().__init__()

        self.url = news_url
        self.soup = self.fetch_page()

    def fetch_page(self):
        if '99csw.com' not in self.url:
            self.logger.log('debug', "网址不属于藏书网，可能无法成功获取内容")
            return None

        self.browser.get(self.url)
        try:
            # 等待最多10秒
            WebDriverWait(self.browser, 30).until(
                expected_conditions.presence_of_element_located((By.ID, "content"))
            )
            # 元素已加载，可以进行后续操作
            response_source = self.browser.page_source
        finally:
            # 关闭浏览器
            self.browser.quit()

        self.logger.log('debug', f'response_source: {response_source}')
        with open('response.txt', 'w', encoding='utf-8') as f:
            f.write(response_source)
            self.logger.log('debug', f'successfully fetch page and write it to response.txt')

        return BeautifulSoup(response_source, 'lxml')

    def get_title(self):
        if not self.soup:
            return None
        tags_to_check = ['dd']
        for tag in tags_to_check:
            header_div = self.soup.find(tag, {'id': 'subject'})
            print(f'header_div: {header_div}')
            title_div = header_div.find('div')
            if title_div:
                # 获取文本内容，去除两端空白字符
                title_text = title_div.get_text(strip=True)
                # 去除两端的双引号
                # return title_text.strip('"')
                return title_text
        return None

    def get_author(self):
        if not self.soup:
            return None

        author_name_tag = self.soup.select('span[data-testid="author-name"]')[0]
        if author_name_tag:
            author_name = author_name_tag.get_text(strip=True)
            return author_name

        return None

    def get_publish_date(self):
        if not self.soup:
            return None

        date_tag = self.soup.select('span[data-testid="updatetime"]')[0]
        self.logger.log('debug', f'date_tag: {date_tag}')

        if date_tag:
            publish_date = date_tag.get_text(strip=True)
            publish_date_obj = datetime.strptime(publish_date, "%Y-%m-%d %H:%M")
            return publish_date_obj.strftime("%Y年%m月%d日")

        return None

    def get_content(self):
        if not self.soup:
            return None

        content_tag_parent = self.soup.find('div', {'data-testid': 'article'})
        self.logger.log('debug', f'content_tag_parent: {content_tag_parent}')
        self.logger.log('debug', f'len(content_tag_parent): {len(content_tag_parent)}')

        if not content_tag_parent:
            return None

        # 获取所有段落节点
        content_tag = content_tag_parent.find_all('span', class_='bjh-p')

        content_parts = []

        # 遍历所有找到的span标签，并获取其文本内容
        for paragraph in content_tag:
            text_content = paragraph.get_text(strip=True)
            content_parts.append(text_content)


        # 将段落列表连接成一个字符串，段落之间用换行符分隔
        content_text = "\n".join(content_parts)

        # 遍历列表，查找并截取文本
        for keyword in END_KEYWORDS:
            index = content_text.find(keyword)
            if index != -1:
                content_text = content_text[:index]

        # 去除末尾可能存在的多余换行符
        content_text = content_text.rstrip()

        return content_text

    def get_content_v1(self):
        if not self.soup:
            return None

        content_tag_parent = self.soup.find('div', {'data-testid': 'article'})
        self.logger.log('debug', f'content_tag_parent: {content_tag_parent}')
        self.logger.log('debug', f'len(content_tag_parent): {len(content_tag_parent)}')

        if not content_tag_parent:
            return None

        # 获取所有子节点文本内容
        content_parts = content_tag_parent.find_all(string=True)

        content_parts_no_number = [item for item in content_parts if not re.match(r'^\d*$', item)]
        print(f'content_parts_no_number: {content_parts_no_number}')

        content_text = "\n".join(content_parts_no_number)

        # 遍历列表，查找并截取文本
        for keyword in END_KEYWORDS:
            index = content_text.find(keyword)
            if index != -1:
                content_text = content_text[:index]

        # 去除末尾可能存在的多余换行符
        content_text = content_text.rstrip()

        return content_text



if __name__ == "__main__":
    url = 'https://www.99csw.com/article/4526.htm'
    article = GetArticleFromCsw(url)

    print('info', f'标题:  {article.get_title()}')
    print('info', f'作者:  {article.get_author()}')
    print('info', f'发布日期: {article.get_publish_date()}')
    print('info', f'文章内容: {article.get_content_v1()}')
