from time import sleep
from helper.asynccall import asynccall
import helper
import re

from time import sleep
from bs4 import BeautifulSoup#从bs4导入beautifulsoup包

class Filter(object):
    
    @asynccall
    def http_a(http,leve):   
        
        if (leve>2):
            print('['+str(leve)+']层级大于 3 级，退出')
        print('['+str(leve)+']准备从 ['+http+'] 上获取所有的链接')
        invalidLink1='#'
        # 非法URL 2
        invalidLink2='javascript:void(0)'
        html =helper.get_html(http)#获取网页源码
        if html==None:
            return
        lista= helper.get_certain_joke(html,'div[class="article"]') 
        c=0
        for div in lista:
            #print(div)
            listli=div.find("ol",attrs={"class":"grid_view"}).find_all("li")
            for li in listli:
                id =li.find("div", attrs={"class": "item"}).find("div", attrs={"class": "pic"}).find("em").text.strip()
                name=li.find("div",attrs={"class":"info"}).find("div",attrs={"class":"hd"}).find("span",attrs={"class":"title"}).text;
                print ('['+str(leve)+']第【'+str(id)+'】个影片：'+name)
            pagelist=div.find("div",attrs={"class":"paginator"}).find_all("a");
            for page in pagelist:
                href=http+page["href"]
                if leve<2:
                    Filter.http_a(href,leve+1)   
                    #return 
            
            #sleep(0.01);
        print('['+str(leve)+']['+http+'] 上的链接获取完毕')
        return

