from bs4 import BeautifulSoup
from Tools import getSoup


def getClassName(start_url):
    soup = getSoup(start_url)
    return soup.find('a', href=start_url).text
def getGroStartUrl(gro_url):
    temp = gro_url.split('/')
    return '%s//%s/%s/'%(temp[0],temp[2],temp[3])

def get_naxt_url(soup):
    """提取下一页链接尾部"""
    temp = soup.find('a', text='下一页')
    if temp == None:        #如果是尾页，提取不到内容
        return None
    return temp.get('href')

def getGroNameUrl(start_url):
    """接收分类链接列表，返回组图名称和链接"""
    next_url = start_url
    while(True):
        soup = getSoup(next_url)
        temp = soup.find('dl', attrs={'class': 'list-left public-box'}) #提取指定代码块
        # if temp == None:
        #         break
        items = temp.find_all('dd', class_= None)       #提取套图的元素
        for item in items:
            name = item.img.get('alt')
            url = item.a.get('href')
            yield name, url

        tmp = get_naxt_url(soup)
        if tmp == None: #如果为空，则到了尾页，退出循环
            break
        next_url = start_url + tmp  #拼接下一页链接



def getPicNameUrl(gro_url, start_url):
    next_url = gro_url
    while(True):
        soup = getSoup(next_url)
        temp = soup.find('div', class_='content-pic')
        url = temp.img.get('src')
        name = temp.img.get('alt')

        yield name,url
        
        tmp = get_naxt_url(soup)
        if tmp == None:
            break
    
        next_url = start_url + tmp


def getGroName(gro_url):
    soup = getSoup(gro_url)
    temp = soup.find('div', class_='content-pic')
    name = temp.img.get('alt')
    return name.split('(')[0]