from sqlalchemy import select, desc

from app.config import db
from app.model import News  # 导入新闻模型
import re
import requests
from app.repository.news import NewsRepository
from bs4 import BeautifulSoup

class NewsService:
    @staticmethod
    async def get_all_news():
        query = select(News.id, News.title, News.docdate, News.summary).order_by(
            desc(News.docdate))  # Specify the fields you want to select
        result = await db.execute(query)
        return result.all()
    @staticmethod
    async def get_news_by_id(news_id: int):
        result = await NewsRepository.find_by_id(news_id)
        return result

    @staticmethod
    async def get_main_context(url):
        payload = {}
        headers = {
            "User-Agent": "Apifox/1.0.0 (https://apifox.com)",
            "Accept": "*/*",
            "Host": "www.moe.gov.cn",
            "Connection": "keep-alive",
        }
        response = requests.request("GET", url, headers=headers, data=payload)
        response.encoding = "utf-8"
        if response.status_code == 200:
            html = response.text
            soup = BeautifulSoup(html, "html.parser")
            main_content = soup.find("div", class_="moe-detail-box")  # 替换为实际的主要内容容器选择器
            if main_content:
                pass
            else:
                print("未找到主要内容容器")
        else:
            print("无法访问网页")
        return main_content.encode_contents().decode("utf-8")

    @staticmethod
    async def update_news():
        url = "http://so.moe.gov.cn/s?qt=%E9%AB%98%E4%B8%AD&siteCode=bm05000001&tab=xw&sitePath=true&toolsStatus=1"
        headers = {
            "User-Agent": "Apifox/1.0.0 (https://apifox.com)",
            "Accept": "*/*",
            "Host": "so.moe.gov.cn",
            "Connection": "keep-alive",
        }
        response = requests.request("GET", url, headers=headers)
        match = re.search(r"initPubProperty\((.*?)\);", response.text, re.DOTALL)
        if match:
            init_property = match.group(1)
            init_property = init_property.replace(",", "").replace("'", "").split("\n")
            init_property = list(map(lambda x: x.strip(), init_property))
            init_property = [x for x in init_property if x != ""]
            timeStamp = init_property[5]
            wordToken = init_property[6]
            suid = init_property[12]
            print("timeStamp: ", timeStamp, "wordToken: ", wordToken, "suid: ", suid)
        else:
            print("未找到 initPubProperty")
            return "未找到 initPubProperty"
        url = "https://api.so-gov.cn/s"
        payload = f"siteCode=bm05000001&tab=xw&timestamp={timeStamp}&wordToken={wordToken}&page=1&pageSize=20&qt=%E9%AB%98%E4%B8%AD&timeOption=1&sort=dateDesc&keyPlace=0&fileType=&toolsStatus=1&days=50"
        headers = {
            "authority": "api.so-gov.cn",
            "accept": "application/json, text/javascript, */*; q=0.01",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
            "origin": "http://so.moe.gov.cn",
            "referer": "http://so.moe.gov.cn/",
            "sec-ch-ua": '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": '"Windows"',
            "sec-fetch-dest": "empty",
            "sec-fetch-mode": "cors",
            "sec-fetch-site": "cross-site",
            "suid": suid,
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
            "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Host": "api.so-gov.cn",
            "Connection": "keep-alive",
        }
        response = requests.request("POST", url, headers=headers, data=payload)
        new_list = []
        for i in response.json()["data"]["search"]["searchs"]:
            title = i.get("title", "")  # 获取标题
            view_url = i.get("viewUrl", "")  # 获取视图链接
            summary = (
                i.get("summary", "")
                .replace("<em>", "")
                .replace("</em>", "")
                .replace("“", "")
                .replace("”", "")
            )  # 处理摘要
            docDate = i.get("docDate", "")  # 获取发布日期
            new_list.append(
                {"title": title, "url": view_url, "summary": summary, "docdate": docDate}
            )
        print("获取到的新闻数量：", len(new_list))
        count = 0
        for item in new_list[:]:
            item["main_content"] = await NewsService.get_main_context(item["url"])
            count += await NewsRepository.add_news(item)
        return count, len(new_list)



