"""
文章采集器
"""
import json
import logging

from . import WebWorker

from core.config import *
from modules.response_format import *

# 收集所有 prompt
prompt = {}

for error_type in ['词语误用', '句子歧义']:
    with open(f'./prompt/{error_type}.txt', 'r', encoding='utf-8') as f:
        prompt[error_type] = f.read()

with open('./prompt/错误总结.txt', 'r', encoding='utf-8') as f:
    summary_prompt = f.read()


class ArticleProcessor(WebWorker):
    regex_url = '.*jxnhu\.edu\.cn\/info\/.*'

    def process(self):
        logging.info(f"页面 {self.url} 属于文章页面准备处理......")

        self.process_set(self.url, status='获取文章内容中......')

        article, showtitle = self.dump_article()

        # 有些东西不检查吧，省的添麻烦
        for ban_name in ["中国教育在线", "读嘉", "中国教育在线", "潮新闻客户端", "嘉兴日报"]:
            if ban_name in showtitle:
                yield [], []  # 第一个为错误的集合，第二个为新的链接
                return

        if article is None or article.strip() == '':
            yield [], []
            return

        logging.info("获取到文章内容：" + article)

        checking_contents = []

        for key, value in prompt.items():
            self.process_set(self.url, status=f'大模型正在进行{key}检测......', note=f"《{showtitle}》")

            messages = [
                {"role": "system", "content": value},
                {"role": "user", "content": article}
            ]

            checking_result = openai_client.chat.completions.create(
                model=MODEL_CHECK,
                messages=messages,
                max_tokens=4096,
                temperature=0
            )

            if hasattr(checking_result.choices[0].message, 'reasoning_content'):
                llm_reason = checking_result.choices[0].message.reasoning_content
            else:
                llm_reason = None

            llm_content = checking_result.choices[0].message.content

            logging.debug("语言模型思考：" + str(llm_reason))
            logging.debug("语言模型回应：" + str(llm_content))

            if '<|无明显错误|>' in llm_content or llm_content.strip() == '':
                continue

            checking_contents.append(llm_content)

        self.process_set(self.url, status=f'大模型正在进行总结错误......', note=f"《{showtitle}》")

        # 拼接所有结果文本
        checking_content = '\n\n'.join(checking_contents).strip()

        if checking_content == '':
            yield [], []  # 第一个为错误的集合，第二个为新的链接
            return

        messages = [
            {"role": "system", "content": summary_prompt},
            {"role": "user", "content": checking_content}
        ]

        summary_result = openai_client.chat.completions.create(
            model=MODEL_SUMMARY,
            messages=messages,
            max_tokens=4096,
            temperature=0
        )

        _, mistake = parse_content_to_json(summary_result.choices[0].message.content)

        if mistake is None or len(mistake) == 0:
            yield [], []  # 第一个为错误的集合，第二个为新的链接
            return

        logging.debug(json.dumps(mistake, indent=4))

        reason = {
            'title': showtitle,
            'article': article,
            'mistake': mistake
        }

        yield [self.summary_process(
            valid=False,
            reason=reason,
            url=self.url,
            note=f"《{showtitle}》"
        )], []

        return

    def dump_article(self):
        """
        获取这个页面上的文章
        """
        response, soup, _ = self.get_normal()

        if response.status_code != 200:
            return None, None

        title_elem = soup.find('div', class_='news-title')

        if title_elem is not None:

            showtitle = title_elem.find('h1', class_='showtitle')
            if showtitle is None or showtitle.text.strip() == '':
                showtitle = '__留空__'
            else:
                showtitle = showtitle.text

            content = soup.find('div', class_='news-content')

            if content is None:
                return None, None

            result = ""

            for p in content.find_all('p'):
                result += p.text.strip() + '\n'

            return result, showtitle

        else:  # https://news.jxnhu.edu.cn/info/

            showtitle = soup.find('div', class_='chapter-title').text.strip()
            content = soup.find('div', class_='v_news_content')

            if content is None:
                return None, None

            result = ""

            for p in content.find_all('p'):
                result += p.text.strip() + '\n'

            return result, showtitle
