'''
Author: focus-on-jiaran-dundundun 331197689@qq.com
Date: 2024-10-21 21:19:58
LastEditors: focus-on-jiaran-dundundun 331197689@qq.com
LastEditTime: 2024-10-21 23:37:40
FilePath: /Desktop/漫画爬虫/tmp.py
Description: 对搜索进行优化，适用于大部分场景下的搜索
'''
from bs4 import BeautifulSoup
import requests
class Content:
    def __init__(self, topic, url, title, body):
        self.topic = topic
        self.url = url
        self.title = title
        self.body = body

    def print(self):
        """
        用灵活的打印函数控制结果
        """
        print("New article found for topic: {}".format(self.topic))
        print("TITLE: {}".format(self.title))
        print("BODY:\n{}".format(self.body))
        print("URL: {}".format(self.url))

class Website:
    def __init__(self, name, url, searchUrl, selector, titleTag, bodyTag):
        self.name = name
        self.url = url
        self.searchUrl = searchUrl
        self.selector = selector
        self.titleTag = titleTag
        self.bodyTag = bodyTag

    def set_resultList(self, resultList):
        self.resultList = resultList

class Crawler:
    def getPage(self, url):
        try:
            html = requests.get(url)
            return  BeautifulSoup(html.text,'html.parser')
        except:
            print('未找到该网站')
            exit(1)

    def safeGet(self, bs, selector):
        childObj = bs.select(selector)
        if childObj is not None and len(childObj) > 0:
            return childObj[0].get_text()
        return None
    
    def search(self, topic, site):
        """
        根据主题搜索网站并记录所有找到的页面
        """
        bs = self.getPage(site.url)
        title = self.safeGet(bs, site.titleTag)
        body = self.safeGet(bs, site.bodyTag)
        if(title == None and body == None):
            print('未找到该网站的标题信息')
            exit(1)
        else:
            #content = Content(topic, site.url, title, body)
            #content.print()
            bs = self.getPage(f'{site.searchUrl}{topic}')
            searchResults = bs.select(site.selector)
            if searchResults == None:
                print('未找到搜索结果')
            else:
                site.set_resultList(searchResults)

crawler = Crawler()

siteData = [
    ['漫画网站', 'https://www.itsacg.com/plugin.php?id=jameson_manhua',
    'https://www.itsacg.com/plugin.php?id=jameson_manhua&a=search&c=index&keyword=','p.mt5.mb5 > a', 
    'h1','body'],
]
sites = []
for row in siteData:
    sites.append(Website(row[0], row[1], row[2],
                         row[3], row[4], row[5]))
topics = ['我的青梅']

for topic in topics:
    print("GETTING INFO ABOUT: " + topic)
    for targetSite in sites:
        crawler.search(topic, targetSite)

for site in sites:
    for url in site.resultList:
        print(url.attrs['href'])
