from DrissionPage import SessionPage
from DrissionPage import errors
from bs4 import BeautifulSoup
import time
from utils.ACAuto import contains_all_keywords
from utils.LoginUtils import UnifiedLogger
from SpiderInterface import SpiderInterFace
import utils
from py_mini_racer import MiniRacer

class Renming(SpiderInterFace):
    def __init__(self, keywords:str, perPageNum:int = 200):
        # 爬虫网站名称
        self.webName = "光明日报"
        # 总页数
        self.totalPages = 0
        # 每页条数
        self.perPageNum = perPageNum
        # self.batchQuantity = batchQuantity
        # 当前页数
        self.currentPage = 1
        # 关键词
        self.keywords = keywords
        # 数据url
        self.url = "http://search.people.cn/search-platform/front/search"
        # 获取token
        self.tokenUrl = "https://zhonghua.gmw.cn/service/getToken.do?callback=jQuery172011993289611962399_1737540926402&_={time}"
        # 请求体
        self.postData = {"key":"习近平 福建 环保","page":1,"limit":1,"hasTitle":'true',"hasContent":'true',"isFuzzy":'true',"type":0,"sortType":2,"startTime":0,"endTime":0}
        # 内容获取索引
        self.classStrList = ["rm_txt_con cf","text_con text_con01"]
        # 请求处理设置
        self.retryNum = 3
        self.timeout = 2

        # 工具初始化
        self.page = SessionPage()
        self.logging = UnifiedLogger.get_logger()
        self.utils = utils.Utils()
        self.ctx = MiniRacer()

        # 日志记录分类
        self.getDetailsJson = "getDetailsJson"
        self.getPageHtml = "getPageHtml"
    
    # 获取token
    def getToken(self):
        # 从文本获取token的值jQuery172011993289611962399_1737540926402({"token":"ld2NxPnx36F7IO0RnkXwF/gmvWUHB0DDP31hhq0qZ3HgN8mhAjnIgxHhMDaVKk1e"})
        self.page.get(self.tokenUrl.format(time=int(time.time()*1000)))
        token_str = self.page.html
        # 使用正则表达式匹配 token 的值
        pattern = r'"token":"([^"]+)"'
        return re.search(pattern, token_str).group(1)
    
    def getJsonData(self, keyWords, page, perPageNum):
        self.postData["key"] = keyWords
        self.postData["page"] = page
        self.postData["limit"] = perPageNum
        postResult = self.page.post(self.url, json = self.postData, retry=self.retryNum, timeout=self.timeout) 
        self.logRequestResult(postResult, self.getDetailsJson, self.webName,currentPage=page)
        return self.page.json

    def logRequestResult(self, result, webType, webName, url = None, currentPage = 0):
        if webType == self.getDetailsJson:    
            if result:
                self.logging.info("关键词：{keywords},{webName}第{currentPage}页数据,获取数据成功".format(keywords=self.keywords, webName = webName,currentPage=currentPage))
            else:
                self.logging.error("关键词：{keywords},{webName}第{currentPage}页数据,获取数据失败".format(keywords=self.keywords, webName = webName, currentPage=currentPage))
                raise Exception("获取数据异常，为保证程序安全，程序终止")
        elif webType == self.getPageHtml:
            if result:
                self.logging.info("关键词：{keywords},{webName}:{url},获取数据成功".format(keywords=self.keywords, webName = webName, url=url))
            else:
                self.logging.error("关键词：{keywords},{webName}第{currentPage}页数据:{url},获取数据失败".format(keywords=self.keywords, webName = webName, currentPage=currentPage, url=url))
                raise Exception("获取数据异常，为保证程序安全，程序终止")
    
    def initialization(self):
        dataDetailsJson = self.getJsonData(self.keywords, 1, 1)
        totalNums = dataDetailsJson["data"]['total']
        self.totalPages = totalNums // self.perPageNum
        self.currentPage = 1
        self.logging.info("当前关键词：{keyWords}查询记录共：{totalNums},每页数据为：{perPageNum},共计{pages}页".format(keyWords = self.keywords, totalNums = totalNums, perPageNum = self.perPageNum, pages = self.totalPages))
        return
    
    def AcMatch(self):
        content = None
        imgUrl = None
        videoUrl = None
        for classStr in self.classStrList:
            try:
                content = self.page.ele(".%s" %classStr).text
                matchResult = contains_all_keywords(content, self.keywords)
                if matchResult:
                    try:
                        imgUrl = self.page.ele(".%s" %classStr).ele("tag:img").link
                    except errors.ElementNotFoundError:
                        pass
                    try:
                        videoUrl = self.page.ele(".%s" %classStr).ele("tag:video").link
                    except errors.ElementNotFoundError:
                        pass
                    return content, imgUrl, videoUrl
                else:
                    return None, None, None
            except errors.ElementNotFoundError:
                continue
    
    def getDataByPage(self, needPage):
        resultList = []
        jsonData = self.getJsonData(keyWords=self.keywords, page=needPage, perPageNum=self.perPageNum)
        newsList = jsonData["data"]["records"]
        for i in newsList:
            # 创建BeautifulSoup对象
            soup = BeautifulSoup(i['title'], 'html.parser')
            # 提取所有文本内容
            title = soup.get_text()
            url = i['url']
            getResult = self.page.get(url, retry=3, timeout=5)
            self.logRequestResult(getResult, self.getPageHtml, self.webName, i['url'], needPage)
            time.sleep(4)
            content,imgUrl,videoUrl = self.AcMatch() 
            if content is None:
                continue
            result = {
                "title":title,
                "url":url,
                "content":content,
                "imgUrl":imgUrl,
                "videoUrl":videoUrl
            }
            resultList.append(result)
        return resultList

if __name__ == "__main__":
    renming = Renming()
    renming.main("科技 上海")
