from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import sys

sys.setrecursionlimit(20000)

pages = set()


def getLinks(articleUrl):
    global pages
    html = urlopen("http://php.cppedu.net/" + articleUrl)
    bsObj = BeautifulSoup(html, "html.parser")

    try:
        print(bsObj.find('h1').get_text())
        # links = bsObj.find('body').findAll("a", href=re.compile("^(?!(http))+[A-Za-z0-9\._+]+(.html)$"))
    except AttributeError:
        try:
            print(bsObj.find('h3').get_text())
        except AttributeError:
            print('无此属性')
        #print('页面缺少属性!')

    # for link in links:
    #     if 'href' in link.attrs:
    #         if link.attrs['href'] not in pages:
    #             newPage = link.attrs['href']
    #             print("----------------\n" + newPage)
    #             pages.add(newPage)
    #             getLinks(newPage)


getLinks("language.namespaces.fallback.html")
