#!/bin/env python
# -*- coding: utf8 -*-

from bs4 import BeautifulSoup
import urllib2
from urlparse import urljoin
import time
import sys

def urlopen(url, timeout, headers = None, refer=None):
	req = urllib2.Request(url)
	if refer: req.add_header('Referer', refer)
	req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/37.0.2062.94 Chrome/37.0.2062.94 Safari/537.36')
	if headers:
		for k, v in headers.iteritems():
			req.add_header(k, v)

	# todo: support percentage with content-length, and save block
	r = urllib2.urlopen(req, timeout=timeout)
	html = r.read()
	return html

def get_title(soup):
	return soup.title.string

def get_next_page_url(soup):
	next_pages = soup.find_all('a', text='下一頁')
	#print next_pages
	#print next_pages[0].get('href')
	if next_pages:
		next_url = next_pages[0].get('href')
		next_url = next_url.replace('../../..', '')
		return next_url
	else:
		return None

def get_authors(soup):
	authors = []
	#for author in soup.find_all('font', attrs={'face': 'Gulim'}):
	#	authors.append(author.text.encode('utf8'))
	for author in soup.find_all('th', attrs={'class': 'r_two'}):
		#print author
		for string in author.strings:
			if string.strip():
				#print string
				authors.append(string.encode('utf8'))
				break

	return authors

#def get_contents(soup):
#	contents = []
#	for content in soup.find_all('div', attrs={'class': 'tpc_content'}):
#		for string in content.strings:
#			#print string.encode('utf8')
#			contents.append(string.encode('utf8'))
#
#	return contents
#
#def get_page_content(soup, author):
#	authors = get_authors(soup)
#	contents = get_contents(soup)
#	return contents
#	print len(authors), len(contents)
#
#	slices = []
#	for a, c in zip(authors, contents):
#		#print a, author
#		if a == author:
#			#print "got ..."
#			slices.append(c)
#			
#	return slices

def get_page_content(soup, author):
	authors = get_authors(soup)

	contents = []
	topics = soup.find_all('div', attrs={'class': 'tpc_content'})
	for a, topic in zip(authors, topics):
		if a != author:
			continue

		for string in topic.strings:
			contents.append(string.encode('utf8'))
	return contents

def getbook(url, author = None, fp = None):
	print 'get %s ...' % url
	while True:
		try:
			html = urlopen(url, 30)
			break
		except Exception, msg:
			print 'Error: ', msg
			time.sleep(3)
		
	#print html
	soup = BeautifulSoup(html,from_encoding="gbk")
	if not author:
		author = get_authors(soup)[0]
		#print author
	if not fp:
		title = get_title(soup)
		#print title
		name = title + '.txt'
		fp = open(name, 'ab')
	contents = get_page_content(soup, author)
	#print contents
	fp.write("\r\n".join(contents))
	fp.flush()

	next_url = get_next_page_url(soup)
	if next_url:
		next_url = urljoin(url, next_url)
		time.sleep(1)
		getbook(next_url, author, fp)
	else:
		fp.close()


if len(sys.argv) < 1:
	print "Usage: %s url [author]" % sys.argv[0]
	sys.exit(0)	

#url='http://t66y.com/read.php?tid=400762&page=1'
url = sys.argv[1]
author = None
if len(sys.argv) > 2:
	author = sys.argv[2]

# fix IncompleteRead exception
import httplib
httplib.HTTPConnection._http_vsn = 10
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0'

getbook(url, author)



