from urllib.parse import urljoin
import pandas as pd
from bs4 import BeautifulSoup

from about_module import resolveHistory, resolveIdea, resolveView
from team_module import resolveManagementTeam, resolveTeachTeam
from guidance_module import resolveWhereGo, resolveDuty, resolvePlan
from common import setup_session, save_file, get_response

BLACKLIST = [
    '首页',
    '关于我们',
    '师资团队',
    '学术体系',
    '升学指导',
    '招生报名',
    '校园生活',
    '工作机会',
    '网上报名',
]

function_map = {
    # "办学历史": resolveHistory,
    # "办学理念": resolveIdea,
    # "校园景色": resolveView,
    # "管理团队": resolveManagementTeam,
    # "教学团队": resolveTeachTeam,
    "毕业生去向": resolveWhereGo,
    # "部门职责": resolveDuty,
    # "升学规划": resolvePlan,
}


def extract_links(content):
    soup = BeautifulSoup(content, 'html.parser')
    linkInfos = []
    clearfix_div = soup.find('ul', class_='clearfix')
    if clearfix_div:
        a_tags = clearfix_div.find_all('a')
        for a in a_tags:
            print(f"链接文字: {a.text}, URL: {a['href']}")
            linkInfos.append({"title": a.text, "href": a['href']})
    return linkInfos


def crawl_page(session, url, linkInfos):
    for link in linkInfos:
        title = link['title']
        target_url = urljoin(url, link['href'])
        if title in BLACKLIST:
            continue
        response = get_response(session, target_url)
        if response:
            if title in function_map:
                function_map[title]({"href": target_url, "title": title, "content": response.text, "session": session})
            else:
                print(f"{title} 未找到对应的处理方法!!!!!")


def main():
    homepage_url = 'https://www.ghcis.com/'
    session = setup_session()
    response = get_response(session, homepage_url)

    if response:
        linkInfos = extract_links(response.text)
        save_file(linkInfos)
        crawl_page(session, homepage_url, linkInfos)


if __name__ == "__main__":
    try:
        main()
    except Exception as e:
        print(f"Error in main execution: {str(e)}")
