'''
Author: focus-on-jiaran-dundundun 331197689@qq.com
Date: 2024-10-20 19:43:00
LastEditors: focus-on-jiaran-dundundun 331197689@qq.com
LastEditTime: 2024-10-21 19:48:02
FilePath: /Desktop/漫画爬虫/search.py
Description: 用户自定义搜索内容，用于获取相关网址数据
'''
from bs4 import BeautifulSoup
from urllib.request import urlopen
import urllib.parse
import utils
from requests import Response
from curl_cffi import requests

verify = 'E:/certificate/itsacg.crt'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
    'Accept-Language': 'en-US,en;q=0.9',
    'Referer': 'https://www.itsacg.com/plugin.php?id=jameson_manhua'
}
openUrl = "https://www.itsacg.com/plugin.php?id=jameson_manhua&a=search&c=index&keyword=" 
def getPageList(inputStr):
    encoded_input = urllib.parse.quote(inputStr)
    html = requests.get(f'{openUrl}{encoded_input}',headers=headers, verify=verify)
    bs = BeautifulSoup(html.text,'html.parser')
    with open('debug.html', 'w', encoding='utf-8') as f:
        f.write(str(bs))  # 使用prettify方法格式化HTML输出
        
    page_list = bs.find('div',{'class':'page'}).find_all('a')
    page_url_list = []
    if  len(page_list) != 0:
        for page in page_list:
            page_url_list.append(page.get_text())
        sorted(page_url_list)
        return page_url_list[len(page_url_list)-2]
    else:
        return None

def inputSourceByPage(inputStr, pageNum):
    encoded_input = urllib.parse.quote(inputStr)
    html = requests.get(f'{openUrl}{encoded_input}&page={pageNum}', verify=verify, headers=headers)
    bs = BeautifulSoup(html.text,'html.parser')
    url_list = bs.select('p.mt5.mb5 > a')
    id_list = []
    name_list = []
    if len(url_list) != 0:
        for name in url_list:
            id_list.append(utils.get_id(name.attrs['href']))
            name_list.append(name.get_text())
            print(name.get_text())
        return id_list,name_list
    else:
        return None

def inputSearch(inputStr):
    encoded_input = urllib.parse.quote(inputStr)
    html = requests.get(f'{openUrl}{encoded_input}',headers=headers, verify=verify)
    bs = BeautifulSoup(html.text,'html.parser')
    url_list = bs.select('p.mt5.mb5 > a')
    id_list = []
    name_list = []
    if len(url_list) != 0:
        for name in url_list:
            id_list.append(utils.get_id(name.attrs['href']))
            name_list.append(name.get_text())
            print(name.get_text())
        return id_list,name_list
    else:
        return None
    
