
from utils.commonUtil import get_bs4_obj 
import requests,os,re
from urllib.parse import quote,urlparse



def download_image(url):
    '''获取图灵图书的封面图片'''
    bs4 = get_bs4_obj(url)
    imgTags = bs4.findAll(r"img")
    saveImgs = [imgTags[i] for i in range(0,5)] # 因为太多了 只获取5张测试效果
    for img in saveImgs:
        #print(type(img.attrs),img.attrs['src']) # Tag 实例的attrs是一个字典 获取标签的所有属性-值
        imgSrc = img.attrs['src']
        name = re.split("\/",imgSrc)[-1] + ".jpg"
        name = os.path.join('collectImgs',name)
        print("开始下载:",imgSrc)
        pic = requests.get(imgSrc)
        print("保存在:",name,'pic',type(pic))
        with open(name,'wb') as f: #写入图片文件需要二进制方式写入
            f.write(pic.content)


def get_jetli_movies(url):
    bs4 = get_bs4_obj(url)
    moviesLinks = bs4.findAll("a",{"href":re.compile("\/item\/.*")})
    print("moviesLinks",len(list(moviesLinks)))
    imgs = []
    for link in moviesLinks:
        img = link.find("img")
        if img:
            print(img.attrs['src'])

url = "http://www.ituring.com.cn/book" 
#url = "http://file.ituring.com.cn/SmallCover/0100d482e1e61a3b3fe8"
#download_image(url)

url = "http://baike.baidu.com/link?url=GLFNrFfGDwq7cm5PmnLYMf5ngeXwtjoWRoC1E88utGSzOvPm0AvyA07UXuKleYfVnaEPoIQYgUxLGD8lSA3gZziVtWACv9DyGyxyWTdfwzo6U2MsZf7uBpIFgitN0UjS"
#get_jetli_movies(url)

url = "https://www.baidu.com/s?word="+quote("李连杰") #解决有中文的编码报错
print(url)
bs4 = get_bs4_obj(url)

def filterTag(tag):
    print(tag.name)
    if tag.name=="a":
        emTag = tag.find("em",text="李连杰")
        if emTag:
            print('tag.get_text',tag.get_text())
            return True
    return False

print(bs4)


def getInternaLinks(bsObj,includeUrl):
    '''获取一个网站的所有内连接列表'''
    includeUrl = urlparse(includeUrl).scheme+"://"+urlparse(includeUrl).netloc
    internaLinks = []
    for link in bsObj.findAll("a",href=re.compile("^(/|.*"+includeUrl+")")):
        if link.attrs['href'] is not None:
            if link.attrs['href'] not in internaLinks:
                if link.attrs['href'].startswith("/"):
                    internaLinks.append(includeUrl+link.attrs['href'])
                else:
                    internaLinks.append(link.attrs['href'])
    return internaLinks

def getExculdeLinks(bsObj,exculdeUrl):
    '''找出网站的所有外部链接'''
    exculdeLinks = []
    #筛选以http或者www或https开头 并且不包括 exculdeUrl 的链接地址 注意 ?! 正则的用法 表示不包括的意思
    for link in bsObj.findAll("a",href=re.compile("^(http|www|https)(?!"+exculdeUrl+").*$")):
        if link.attrs['href'] is not None:
            if link.attrs['href'] not in exculdeLinks:
                exculdeLinks.append(link.attrs['href'])
    
    return exculdeLinks

url = "http://www.37.com/"
#url = "http://game.37.com/play.php?game_id=417&sid=50012"
bs4 = get_bs4_obj(url)
links = getInternaLinks(bs4,url)
links = getExculdeLinks(bs4,url)
print(links)
    