#!/usr/bin/python3
# -*- coding: UTF-8 -*-

import ssl 
ssl._create_default_https_context = ssl._create_unverified_context

from urllib import request, error
from bs4 import BeautifulSoup

class Spider:

    def __init__(self):
        # 初始化url
        self.url = ""
        # 初始化要过滤的url域名
        self.open_url = ""
        # 初始化要过滤的url域名
        self.open_url = ""
        # 爬取深度
        self.count = 2
        # 初始化超时时间
        self.timeout = 1
        # 初始化url列表
        self.url_lst = []

    # 爬虫启动方法
    def startSpider(self):
        # 爬取第一遍，初始化一些url
        urlList = self.contentSpider()

        listData = self.countSpider(urlList)
        print(listData)
    
    # 爬虫爬取层级控制(递归)
    def countSpider(self, url_list_data, count = 2):
        
        if count > self.count:
            # 列表去重
            self.url_lst = self.listRemoval(self.url_lst)
            # 停止执行countSpider并且输出所有的url_list
            return url_list_data

        for item in url_list_data:
            self.url = item
            # urlList += self.contentSpider()
            # print(self.url)
            
            # print(url_list)
            contentSpiderStart = self.contentSpider()
            # if contentSpiderStart == 403:
            #     continue
        
        # exit()
        # 调用countSpider
        count = count + 1
        self.countSpider(self.url_lst,count)


    # 爬虫爬取html并且获取当前页面所有的url
    def contentSpider(self):
        # print(self.url)
        try:
            response = request.urlopen(self.url, timeout = self.timeout)
        except error.HTTPError as e:
            # return e.code
            print(e.reason, e.code, e.headers, sep='\n')
        except error.URLError as e:
            print(e.reason)
        else:
            errorStr = '请求成功，开始爬取...'
            print(errorStr)


        # 调用beautifulSoup进行html处理提取页面中的url
        html_str = response.read().decode('utf-8') #html字符串
        urlList = self.beautifulSoup(html_str)
        
        return urlList

    # 使用BeautifulSoup进行页面内容解析，获取页面url列表
    def beautifulSoup(self, html_str):
        # 对页面进行解析
        soup = BeautifulSoup(html_str, 'html.parser')
        # 查找所有标签为'a'的html元素，并生成列表
        links = soup.find_all('a')
        # 获取每个元素中'href'键对应的键值--即URL，并放入url_lst
        
        for item in links:
            url = item.get('href')
            # 判断字符串是否存在不是指定的类型
            if isinstance(url,(str,int,list)) == False:
                continue
            # 判断字符串中是否包函指定rul或者指定字符串(过滤没有用的url)
            if url.find(self.open_url) == -1:
                continue
            self.url_lst.append(url)
        # 获取到的列表url去重
        self.url_lst = self.listRemoval(self.url_lst)

        print(self.url_lst)

        return self.url_lst

    # 列表去重复
    def listRemoval(self, listData = []):
        T = []
        for i in listData:
            if not i in T:
                T.append(i)
        return T


a = Spider()
a.timeout = 2
a.url = "http://shumeipai.nxez.com"
a.open_url = "nxez.com"
a.count = 4
a.startSpider()