#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id$
"""
net.py. HTTP network i/o wrappers
"""

import urllib2
import time
from base64 import encodestring
from urlparse import urlsplit, urlunsplit, urljoin

from siglobals import log, cfg, regexp, strip_url
from db.sidb import NewsItem, LibNewsItem

class RequestEx(urllib2.Request):
	"""
	Request with custom method support.
	"""
	def set_method(self, value=None):
		self._met = value

	def get_method(self):
		if hasattr(self, '_met'):
			if self._met: return self._met
		return urllib2.Request.get_method(self)


def lastmod2time(lastmod):
	"""
	Converts Last-Modified string to timestamp int

	>>> lastmod2time("Wed, 25 Mar 2009 09:26:48 GMT")
	1237962408
	"""
	try:
		return int(time.mktime(time.strptime(lastmod, "%a, %d %b %Y %H:%M:%S %Z")))
	except:
		log.exception('lastmod2time fail for "%s"' % (lastmod,))
		return 0

def time2lastmod(tstamp):
	"""
	Float/int timestamp to Last-Modified string

	>> time2lastmod(1237962408)
	"Wed, 25 Mar 2009 09:26:48 GMT"
	>> lastmod2time(time2lastmod(1237962408))
	1237962408
	>> time2lastmod(lastmod2time("Wed, 25 Mar 2009 09:26:48 GMT"))
	"Wed, 25 Mar 2009 09:26:48 GMT"
	"""
	try:
		return time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.localtime(tstamp))
	except:
		log.exception('time2lastmod fail for "%s"' % (tstamp,))
		return time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.localtime(0))


def getfile(url, last_mod=None, proxies=None):
	"""
	Get file from site.
	Request HEAD before getting whole file.

	@type url: str
	@param url: full url to requested file
	@type last_mod: float
	@param last_mod: file's last-modified timestamp. If passed and equals to returned then file does not retrieved.
			Otherwise whole file returned if no error
	@type proxies: dict
	@param proxies: proxies override arg for urlopen. If not passed or == None then http_proxy env var used.

	@rtype: tuple(bool, str, float, str)
	@return: bool: No error, str: contents if downloaded, int: new/prev timestamp, str: encoding
	"""
	f = None
	try:
		r = RequestEx(url)
		if cfg['proxy_auth']:
			r.add_header('Proxy-Authorization', 'Basic %s' % (cfg['proxy_auth'],))

		if last_mod: r.add_header('If-Modified-Since', time2lastmod(last_mod))

		prox_handler = urllib2.ProxyHandler(proxies)
		op = urllib2.build_opener(prox_handler)

		# --- two-transaction mode if If-Modified-Since header is not supported ---
		if not cfg['if_modified_supported'] and last_mod:
			r.set_method('HEAD')
			try:
				f = op.open(r)
			except urllib2.HTTPError, e:
				if e.code == 304:
					return True, '', last_mod, ''
				else:
					raise

			if 'Last-Modified' in f.headers:
				if int(last_mod) == lastmod2time(f.headers['Last-Modified']):
					return True, '', last_mod, ''
			f.close()

		# --- default mode. Condition or force get page ---
		r.set_method() # reset to default GET/PUT
		try:
			f = op.open(r)
		except urllib2.HTTPError, e:
			if e.code == 304:
				return True, '', last_mod, ''
			else:
				raise
		lm = lastmod2time(f.headers.get('Last-Modified', time2lastmod(time.time())))
		if last_mod==lm and lm>0:
			return True, '', last_mod, ''
		'''
		ss = []
		s = f.readline()
		while s:
			ss.append(s.replace('\x00',''))
			s = f.readline()
		doc = ''.join(ss)
		'''
		doc = f.read()
		if doc: doc = doc.replace('\x00','')
		cset = f.headers.get('content-type', 'charset=%s' % (cfg['default_charset'],))
		if 'charset=' in cset: cset = cset[cset.index('charset=')+8:]
		else: cset = cfg['default_charset']
		f.close()
		return True, doc, lm, cset

	except urllib2.HTTPError, e:
		if e.code == 404:
			log.debug('File not found "%s"' % (url,))
			return False, 0, 404, '' # file deleted
	except Exception, e:
		log.exception("getfile fail: %s" % (e,))
		return False, '', 0, ''
	finally:
		if f: f.close()


def extract_hrefs(page, basepath):
	hrefs = []
	if not basepath: return hrefs
	url = urlsplit((basepath or '').lower())
	if url.path[-1] == '/':
		basepath = url.path
	else:
		basepath = url.path.rsplit('/',1)[0]

	r0 = regexp['re_hrefs']
	r1 = regexp['re_hrefs_kb']
	g = r0.search(page)
	while g:
		i0 = g.span()[1]
		#for i in xrange(n):
		#	g = xxx[i]
		ign = False

		s = (g.group('href').lower() or u'').strip('\'\" ')
		if not s:
			g = r0.search(page, i0)
			continue

		s = urlsplit(s)
		if not s.path or s.scheme in ('https','ftp'): ign = True
		if not ign and s.path[0] == '.': ign = True
		if not ign and s.netloc and url.netloc and (s.netloc != url.netloc): ign = True
		if not ign and s.path.endswith(tuple(cfg['stop_names'])): ign = True
		if not ign and s.path[0]=='/' and s.path.rsplit('/', 1)[0] != basepath[:-1]: ign = True
			#if not ign and s.startswith('http://') and basepath and basepath != s.rsplit('/',1)[0].rstrip('/')[6:]: ign = True

		if not ign:
			g2 = r0.search(page, i0)
			if g2:
				i1 = g2.span()[0]
			else:
				i1 = None
			g1 = r1.search(page, i0, i1)
			if g1: # found KB
				hrefs.append((urlunsplit(s), g.group('title'), g1.group('sz')))
				i0 = g1.span()[1]
			else:
				hrefs.append((urlunsplit(s), g.group('title'), ''))
			if g2:
				g = g2
				continue
		g = r0.search(page, i0)
	return hrefs

def extract_author(page):
	try:
		x = regexp['re_title'].search(page)
		if x:
			return x.groups(0)[0]

		return ''
	except:
		log.exception('Extract_author')
		return ''

def extract_body(page):
	try:
		x = regexp['re_body'].search(page)
		if x:
			return x.groups(0)[0]
		return ''
	except:
		log.exception('Extract_author')
		return ''


def normalize_url(base, part):
	"""
	Construct full url from full base and full or partial part urls

	>>> normalize_url('http://mysite.com/p/some_path', '/p/some_path/my_page_1.htm')
	'http://mysite.com/p/some_path/my_page_1.htm'

	>>> normalize_url('http://mysite.com/p/some_path/some_htm_page.html', '/p/some_path/my_page_1.htm')
	'http://mysite.com/p/some_path/my_page_1.htm'
	"""
	u1 = urlsplit(base.lower(), False)
	u2 = urlsplit(part.lower(), False)
	if not u1.path or (u1.path in u2.path) or u2.path.startswith('/'): s = u2.path
	else:
		if u1.path.endswith('/'): s = u1.path + u2.path
		else: s = u1.path.rsplit('/',1)[0] + '/' + u2.path

	return urlunsplit((u2.scheme or u1.scheme, u2.netloc or u1.netloc, s, None, None))


def load_4lib(url, timestamp=None, proxy=None):
	"""
	Load, parse, reverse-sorted by timestamp and return (Result, [LibNewsItem], new_timestamp)
	"""
	## 4lib_news cp1251 tab-separated CSV:
	#  href, title, author, size_k, timestamp, genre,
	#
	txt = getfile(url, timestamp, proxy)
	if not txt[0]: return False, None, txt[2] # some error, opt. error code in txt[2] (404/0)
	#f[1].decode(f[3]).encode('utf-8')

	if txt[0]:
		if not txt[1]: return True, [], txt[2]
		s = txt[1].decode(txt[3])
	else: s = txt[1]
	try:
		# save as timestamp, href, title, author

		'''
		data = sorted([(int(y[4]),y[0],y[1],y[2]) for y in \
			(x.strip('\t').split('\t') for x in s.split('\n') if x) if y],
			key=lambda x:x[0],
			reverse=True
			)
		'''
		data = sorted(
			[y for y in (LibNewsItem(x) for x in s.split('\n') if x) if y and y.url],
			key=lambda z:z.timestamp,
			reverse=True
			)

		return True, data, txt[2]
	except:
		log.exception('4lib_news parser.')
		return False, None, 0


def is_inet_available(url='http://zhurnal.lib.ru/', proxies=None):
	"""
	Check availability of library site.

	Try request HEAD from url to determine availability.

	@type url: str
	@param url: full url to requested file
	@type proxies: dict
	@param proxies: proxies override arg for urlopen. If not passed or == None then http_proxy env var used.

	@rtype: bool
	@return: *True* on success, *False* on failure
	"""
	try:
		r = RequestEx(url)
		if cfg['proxy_auth']:
			r.add_header('Proxy-Authorization', 'Basic %s' % (cfg['proxy_auth'],))

		prox_handler = urllib2.ProxyHandler(proxies)
		op = urllib2.build_opener(prox_handler)
		r.set_method('HEAD')

		f = op.open(r) # raises exception on error
		f.close()

		log.debug('I-net check: OK')
		return True
	except Exception, e:
		log.debug('I-net check: FAIL (%s)' % (e,))
		return False

# ======== INIT ========
#log = logging.getLogger('ZHCheck')


# ======== TEST ========
if __name__ == "__main__":
	import logging
	log.addHandler(logging.StreamHandler())
	log.setLevel(logging.DEBUG)
	log.info("%s Unittests." % (__file__,))

	log.info("=====DOCTESTS=====")
	import doctest
	doctest.testmod()
	log.info("===END DOCTESTS===")

	log.info("getfile test")
	cfg['proxy_auth'] = 'YS52b3JvbmluOkZsdkJoRmtfMzE='

	log.info('I-net check result: %s' % (is_inet_available(),))


	x = load_4lib('http://zhurnal.lib.ru/4lib_news')

	if x[0]:
		log.info('save sorted 4lib_news')
		f = open('4lib_news.txt', 'wb')
		#print >> f, u"Timestamp\thref\ttitle\tauthor".encode('utf-8')
		for s in x[1]:
			#s0 = u'%s\t%s\t%s\t%s' % (s[0],s[1],s[2],s[3])
			print >> f, s.render().encode('utf-8')#s0.encode('utf-8')
		f.close()

	f = getfile('http://zhurnal.lib.ru/4lib_news')
	if f[0]:
		log.info("save as 4lib_news.csv in utf-8")
		x = open('4lib_news.csv', 'wt')
		x.write(f[1].decode(f[3]).encode('utf-8'))
		x.close()

	log.info("Result: %s, %s bytes, last-modified: %s" % (f[0], len(f[1]), time2lastmod(f[2])))
	log.info("getfile with timestamp/if-modified=True")
	cfg['if_modified_supported'] = True
	f = getfile('http://zhurnal.lib.ru/4lib_news', f[2])
	log.info("Result: %s, %s bytes, last-modified: %s" % (f[0], len(f[1]), time2lastmod(f[2])))
	log.info('If-Modified-Since: '+('not supported', 'supported')[len(f[1])==0])
	cfg['if_modified_supported']=False
	f = getfile('http://zhurnal.lib.ru/4lib_news', f[2])
	log.info("Result: %s, %s bytes, last-modified: %s" % (f[0], len(f[1]), time2lastmod(f[2])))
	log.info('HEAD '+('worked', 'not worked')[len(f[1])!=0])

	cfg['if_modified_supported'] = True
	log.info('ordinal page get #1')
	f = getfile('http://zhurnal.lib.ru/p/pupkin_wasja_ibragimowich/')
	log.info('LastUpdate: %s' % time2lastmod(f[2]))
	log.info('get again')
	f = getfile('http://zhurnal.lib.ru/p/pupkin_wasja_ibragimowich/', f[2])
	log.info(('Not changed', 'changed')[len(f[1])!=0]+' size=%s' % (len(f[1]),))


	__a = LibNewsItem(u'http://aaa/bb/cc.htm\tНазвание\tАвтор И.О.\t123k\t1237992451\tFantasy\t')
	print __a
	print __a.render()
