import time
import traceback

from lxml import etree
import requests


def remove_trailing_slash(s):
    if s.endswith('/') or s.endswith('#'):
        return s[:-1]
    return s


class Spider:
    def __init__(self):
        self.domain = "https://www.calsp.cn"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.62"
        }

    def get_link(self, home_url=None):
        if home_url is None:
            home_url = self.domain
        res_list = [home_url]
        idx = 0
        while True:
            count = len(res_list)
            if idx >= count:
                break
            for i in range(idx, count):
                get_url = res_list[i]
                idx += 1
                try:
                    session = requests.Session()
                    session.trust_env = False
                    response = session.get(get_url, headers=self.headers)
                    #  判断请求状态
                    response.raise_for_status()

                    element = etree.HTML(response.text)
                    a_list = element.xpath("//a")

                    for a in a_list:
                        hrefs = a.xpath("./@href")
                        if len(hrefs) < 1:
                            continue
                        href = hrefs[0].strip()
                        if href == "":
                            continue
                        href = remove_trailing_slash(href)
                        if href.startswith("#") or href.startswith("/"):
                            href = self.domain + href
                        if not href.startswith(self.domain):
                            continue

                        if href in res_list:
                            continue
                        res_list.append(href)
                except Exception as e:
                    print("ERROR:" + get_url)
                    traceback.print_exc()
                time.sleep(2)
        return res_list


if __name__ == '__main__':
    url_list = Spider().get_link()

    for u in url_list:
        print(u)
