﻿from bs4 import BeautifulSoup
#from db_helper import db_ebooks
from util_tool import safe_download_page

def get_title(s):
	key_begin = '<div itemscope itemtype="http://schema.org/Book">'
	pos_begin = s.find(key_begin)
	if pos_begin == -1:
		return None
	key_end = '<table width="1000" align="center" class="ebook_view">'
	pos_end = s.find(key_end, pos_begin + len(key_begin))
	if pos_end == -1:
		return None
	ss = s[pos_begin + len(key_begin):pos_end]
	s = BeautifulSoup(ss, 'html.parser')
	title = s.h1.string
	title1 = ''
	if not not s.h3:
		title1 = s.h3.string
	return (title, title1)
	
def get_description(s):
	key_begin = '<h4>Book Description</h4>'
	pos_begin = s.find(key_begin)
	if pos_begin == -1:
		return
	key_begin = '<span itemprop="description">'
	pos_begin = s.find(key_begin)
	if pos_begin == -1:
		return
	key_end = '<table width="100%">'
	pos_end = s.find(key_end, pos_begin + len(key_begin))
	if pos_end == -1:
		return
	ss = s[pos_begin: pos_end]
	return ss
	
def get_tags(description):
	desc = BeautifulSoup(description, 'html.parser')
	a_objs = desc.span.find_all('a')
	tags = []
	if not a_objs:
		return tags
	for obj in a_objs:
		tag = {}
		tag['href'] = obj['href']
		tag['name'] = obj.string.encode('utf8')
		tags.append(tag)
	return tags
	
def get_img_url(s):
	key_begin = '<table width="1000" align="center" class="ebook_view">'
	pos_begin = s.find(key_begin)
	if pos_begin == -1:
		return
	key_begin = '<img src='
	pos_begin = s.find(key_begin, pos_begin + len(key_begin))
	if pos_begin == -1:
		return
	key_end = '<div class="google_view">'
	pos_end = s.find(key_end, pos_begin + len(key_begin))
	s = BeautifulSoup(s[pos_begin:pos_end], 'html.parser')
	return s.img['src']
	
def get_info_node(s):
	key_begin = '<table width="100%">'
	pos_begin = s.find(key_begin)
	if pos_begin == -1:
		return
	key_end = '</table>'
	pos_end = s.find(key_end, pos_begin + len(key_begin))
	if pos_end == -1:
		return
	if -1 == s.find('Related Books', pos_begin):
		return BeautifulSoup(s[pos_begin:pos_end + len(key_end)], 'html.parser')
	pos_table = s.find('<table', pos_begin + len(key_begin))
	if pos_table == -1 or pos_table > pos_end:
		return
	pos_table2 = s.find('</table>', pos_table)
	if pos_table2 == -1 or pos_table2 != pos_end:
		return
	pos_end = s.find(key_end, pos_end+len(key_end))
	if pos_end == -1:
		return
	ss = s[pos_begin: pos_end + len(key_end)]
	return BeautifulSoup(ss, 'html.parser')
	
def get_publishers(info_node):
	tr_node = info_node.find_all('tr')[1]
	td_node = tr_node.find_all('td')[1]
	publisher_nodes = td_node.b.find_all('a')
	publishers = []
	for node in publisher_nodes:
		publisher = {}
		publisher['href'] = node['href']
		publisher['name'] = node.string.encode('utf8')
		publishers.append(publisher)
	return publishers
	
def get_authors(info_node):
	tr_node = info_node.find_all('tr')[2]
	td_node = tr_node.find_all('td')[1]
	b_node = td_node.find_all('b')[1]
	author_nodes = b_node.find_all('a')
	authors = []
	for node in author_nodes:
		author = {}
		author['href'] = node['href']
		author['name'] = node.string.encode('utf8')
		authors.append(author)
	return authors

#def get_authors(info_node):
#	tr_node = info_node.find_all('tr')[2]
#	td_node = tr_node.find_all('td')[1]
#	return td_node.b.string

def get_isbn(info_node):
	tr_node = info_node.find_all('tr')[3]
	td_node = tr_node.find_all('td')[1]
	return td_node.b.string

def get_datePublished(info_node):
	tr_node = info_node.find_all('tr')[4]
	td_node = tr_node.find_all('td')[1]
	return td_node.b.string
	
def get_pages(info_node):
	tr_node = info_node.find_all('tr')[5]
	td_node = tr_node.find_all('td')[1]
	return td_node.b.string
	
def get_language(info_node):
	tr_node = info_node.find_all('tr')[6]
	td_node = tr_node.find_all('td')[1]
	return td_node.b.string
	
def get_filesize(info_node):
	tr_node = info_node.find_all('tr')[7]
	td_node = tr_node.find_all('td')[1]
	return td_node.b.string
	
def get_fileformat(info_node):
	tr_node = info_node.find_all('tr')[8]
	td_node = tr_node.find_all('td')[1]
	return td_node.b.string
	
def get_down_url(info_node):
	tr_node = info_node.find_all('tr')[10]
	td_node = tr_node.find_all('td')[1]
	return td_node.a['href']
	
def get_buy_url(info_node):
	tr_node = info_node.find_all('tr')[12]
	td_node = tr_node.find_all('td')[1]
	return td_node.a['href']
	
def get_read_url(info_node):
	try:
		tr_node = info_node.find_all('tr')[14]
		td_node = tr_node.find_all('td')[1]
		return td_node.a['href']
	except:
		return ''
	
def get_book_info(s):
	titles = get_title(s)
	img_url = get_img_url(s)
	description = get_description(s)
	tags = get_tags(description)
	inode = get_info_node(s)
	isbn = get_isbn(inode)
	if not isbn:
		isbn = ''
	authors = get_authors(inode)
	publishers = get_publishers(inode)
	datepublished = get_datePublished(inode)
	datepublished = int(datepublished)
	pages = int(get_pages(inode))
	language = get_language(inode)
	file_size = get_filesize(inode)
	file_format = get_fileformat(inode)
	down_url = get_down_url(inode)
	buy_url = get_buy_url(inode)
	read_url = get_read_url(inode)
	book_info = {}
	book_info['tag'] = tags
	book_info['author'] = authors
	book_info['publisher'] = publishers
#	print '%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n' % (titles[0], titles[1], img_url, isbn, language, file_size, file_format, down_url)
	book_info['info'] = (titles[0].encode('utf8'), titles[1].encode('utf8'), img_url.encode('utf8'), isbn.encode('utf8'), datepublished, int(pages), language.encode('utf8'), file_size.encode('utf8'), file_format.encode('utf8'), down_url.encode('utf8'), buy_url.encode('utf8'), read_url.encode('utf8'), description)
	title0 = book_info['info'][0]
	title1 = book_info['info'][1]
	return book_info

if __name__ == '__main__':
#	url = 'http://it-ebooks.info/book/652/'
	url = 'http://it-ebooks.info/book/960/'
	s = safe_download_page(url)
	book_info = get_book_info(s)
	print str(book_info)
