from bs4 import BeautifulSoup
from urllib import parse

"""头像页面解析器"""
class HeadPageParseer(object):
	
	# 获取头像信息
	def get_head_info(self, head_node):
		info = {}
		try:
			url_node = head_node.find('a', class_='avatar-box text-center')
			info['actress_url'] = url_node['href']
			img_node = head_node.find('div', class_='photo-frame').find('img')
			info['img_url'] = img_node['src']
			name_node = head_node.find('div', class_='photo-info').find('span')
			info['name'] = name_node.get_text()
		except Exception as e:
			print(e)
		return info

	# 获取头像信息列表
	def get_head_datas(self, soup):
		if soup is None:
			return None
		datas = []
		try:
			head_nodes = soup.find_all('div', class_='item')
			for node in head_nodes:
				head_info = self.get_head_info(node)
				datas.append(head_info)
		except Exception as e:
			print(e)
		return datas

	def get_next_page_url(self, soup, base_url):
		if soup is None:
			return None
		full_url = ''
		try:
			next_page_node = soup.find('a', id='next')
			next_page_url = next_page_node['href']
			full_url = parse.urljoin(base_url, next_page_url)
		except Exception as e:
			print(e)
			return ''
		return full_url


	def get_head_page_info(self, response_text, base_url):
		if response_text is None:
			return None
		soup = BeautifulSoup(response_text, 'lxml')
		head_datas = self.get_head_datas(soup)
		next_page_url = self.get_next_page_url(soup, base_url)
		return head_datas, next_page_url
