from bs4 import BeautifulSoup
from urllib import parse
import re

class ZuoPinPageParser(object):

	def get_zuopin_page_info(self, response_text, base_url):
		if response_text is None:
			return None
		soup = BeautifulSoup(response_text, 'lxml')
		actresses_info = self.get_actresses_info(soup)
		zuopin_infos = self.get_zuopin_infos(soup)
		next_url = self.get_next_page_url(soup, base_url)
		return actresses_info, zuopin_infos, next_url

	def get_actresses_info(self, soup):
		if soup is None:
			return None
		info = {}
		try:
			avatar_node = soup.find('div', class_='avatar-box')
			info['head_img'] = avatar_node.find('div', class_='photo-frame').find('img')['src']
			avatar_info_node = avatar_node.find('div', class_='photo-info')
			info['name'] = avatar_info_node.find('span').get_text()
			info['shengri'] = avatar_info_node.find('p', text=re.compile(r"生日:")).get_text().split(':')[1].strip()
			info['nianling'] = avatar_info_node.find('p', text=re.compile(r"年齡:")).get_text().split(':')[1].strip()
			info['shengao'] = avatar_info_node.find('p', text=re.compile(r"身高:")).get_text().split(':')[1].strip()
			info['zhaobei'] = avatar_info_node.find('p', text=re.compile(r"罩杯:")).get_text().split(':')[1].strip()
			info['xiongwei'] = avatar_info_node.find('p', text=re.compile(r"胸圍:")).get_text().split(':')[1].strip()
			info['yaowei'] = avatar_info_node.find('p', text=re.compile(r"腰圍:")).get_text().split(':')[1].strip()
			info['tunwei'] = avatar_info_node.find('p', text=re.compile(r"臀圍:")).get_text().split(':')[1].strip()
			info['aihao'] = avatar_info_node.find('p', text=re.compile(r"愛好:")).get_text().split(':')[1].strip()


			# infos = avatar_info_node.find_all('p')
			# info['shengri'] = infos[0].get_text()
			# info['nianling'] = infos[1].get_text()
			# info['shengao'] = infos[2].get_text()
			# info['zhaobei'] = infos[3].get_text()
			# info['xiongwei'] = infos[4].get_text()
			# info['yaowei'] = infos[5].get_text()
			# info['tunwei'] = infos[6].get_text()
			# info['aihao'] = infos[7].get_text()
		except Exception as e:
			print("craw faile:%s"%(e))
		return info


	def get_zuopin_infos(self, soup):
		if soup is None:
			return None
		zuopins = []
		try:
			zuopin_nodes = soup.find_all('a', class_='movie-box')
			for zuopin_node in zuopin_nodes:
				zuopin_info = {}
				zuopin_info['url'] = zuopin_node['href']
				zuopin_info['photo'] = zuopin_node.find('div', class_="photo-frame").find('img')['src']
				zuopin_info['fanhao'] = zuopin_node.find('div', class_="photo-info").find('span').find('date').get_text().strip()
				zuopins.append(zuopin_info)
		except Exception as e:
			print("craw faile:%s"%(e))
		return zuopins


	def get_next_page_url(self, soup, base_url):
		if soup is None:
			return None
		full_url = ''
		try:
			next_page_node = soup.find('a', id='next')
			next_page_url = next_page_node['href']
			full_url = parse.urljoin(base_url, next_page_url)
		except Exception as e:
			print(e)
			return ''
		return full_url
















