#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: samizdat.py 16 2009-07-31 18:04:02Z av1024 $
#from __future__ import with_statement
import sys
import os, os.path
from urlparse import urlsplit, urlunsplit, urljoin
from locale import getpreferredencoding as enc
from hashlib import md5
import time
import logging

from siglobals import log, cfg, regexp, strip_url, print_progress as progress
from sicfg import init
import net
from db.sixml import XMLdb
from db.sidb import Author, Page, NewsItem, LibNewsItem

# global proxy settings
prox = cfg.get('proxy', None) if not cfg.get('no_proxy', False) else {}


def load_db():
	# NB! file-based mode
	if not os.path.exists(cfg['base']):
		try:
			os.makedirs(os.path.split(cfg['base'])[0])
		except:
			pass
	db = XMLdb(cfg['base'])
	return db

def do_add(db, args):
	"""
	Add authors from passed url(s)
	"""
	for u in args:
		if os.access(u, os.R_OK):
			log.info('Add urls from file "%s"', u)
			progress(3, u'Add urls from file "%s"' % (u,))
			try:
				f = open(u, 'rt')
				try:
					ss = [x.strip('\n\r') for x in f.readlines() if x.strip('\n\r') and not x.startswith(('#',';','//',':'))]
				finally:
					f.close()

				for s in ss:
					s = s.strip()
					if not s: continue
					log.debug('%s will be added.', s)
					progress(3, u"%s will be added" % (s,))
					if cfg.get('add_author', False): db.add_author(s)
					elif cfg.get('add', False):	db.add_page(s, auto_author=True)
			except:
				log.exception('')

		else:
			log.debug('%s will be added.', u)
			progress(3, u"%s will be added" % (u,))
			if cfg.get('add_author', False):
				db.add_author(u, Author())
			elif cfg.get('add', False):
				db.add_page(u, Page(), auto_author=True)



def do_import(url, author, db, txt, subpath=None):
	"""
	url: author's base url or page url,
	author: Author() instance
	db: database instance
	txt: unicode or list. if unicode passed - call extract_hrefs on it

	"""
	if not url: return
	# check/autoadd author
	if not author:
		au = db.find_author(url)
		if not au: au = db.add_author(url)
	else: au = author
	if not au: return

	if isinstance(txt, list): hrefs = txt
	else: hrefs = net.extract_hrefs(txt, url) # href, title, size_k
	for hr in hrefs:
		h = net.normalize_url(url, hr[0])
		au1, pg = db.find_author_page(h)
		#ni = None
		if not pg and au1:
			pg = au1.add_page(strip_url(db.url_by_author(au1), h))
			if pg:
				#ni = ni or NewsItem(au1, db.url_by_author(au1), pg, h, NewsItem.NEW)
				pg.upd = True
				pg._new = True
				log.debug(u'Add page "%s" for %s' % (strip_url(db.url_by_author(au1), h), url))
				progress(3, u'Add new page: %s' % (strip_url(db.url_by_author(au1), h),) )
			else:
				log.info(u"Add page ('%s') failed" % (strip_url(db.url_by_author(au1), h)))
				progress(u"Add new page failed ('%s', '%s')" % (strip_url(db.url_by_author(au1), h)))

		if pg and not pg.disabled:
			if not pg.title: pg.title = hr[1]
			if not hr[2] and not pg.csv_size:
				if not pg.issub:
					pg.issub = True
					pg.subpath = h.rsplit('/',1)[1].split('.',1)[0]
					pg.upd = True
					#ni = ni or NewsItem(au1, db.url_by_author(au1), pg, h, NewsItem.UPD)
					log.debug('\tSubpage %s.', h)

			if hr[2] and not pg.issub:
				if hr[2] <> pg.csv_size:
					pg.upd = True
					log.debug('Import: size changed %s -> %s', pg.csv_size, hr[2])
					pg.csv_size=hr[2]

			if subpath and not pg.subpath: pg.subpath = subpath


		#if ni: db.news.append(ni)

		#print u"HREF: '%s'->'%s', au: %s (%s), page: %s, size: %s" % (hr[0], h, au1, au, pg, hr[2])
		pass

def import_author(url, au, db):
	"""
	Import author's root page.
	url: page url, au: Author(), db: database object

	"""
	# page[IOresult, content, last_modified/error_404, encoding]
	ret, text, date, aenc = net.getfile(url, au.last_modified if not cfg['ignore_lastmod'] else 0, prox)
	if ret:
		txt = unicode(text, aenc)
		if len(text) > 0 and au.last_modified == date and cfg['if_modified_supported']:
			log.debug('Is-Modified-Since not supported. Disable.')
			cfg['if_modified_supported'] = False

		if au.last_modified != date or cfg['ignore_lastmod']:
			au.last_modified = date
			t = net.extract_author(txt)
			if t and not au.name:
				au.name = t
				au.upd = True

			do_import(url, au, db, txt)
		return True

	else:
		if date==404:
			# author deleted (?)
			#TODO: update news, delete author's pages from db
			log.debug('Author "%s" (%s) deleted.', au.name, url)
			progress(3, u"Page %s not found. Author '%s' may be deleted." % (url, au.name))
			return True

		else:
			return False # I/O error


def import_sub(url, au, pg, db, subpath):
	"""
	Import author's sub-page.
	url: page url, au: Author(), pg: Page(), db: database object

	"""
	# page[IOresult, content, last_modified/error_404, encoding]
	ret, txt, date, aenc = net.getfile(url, pg.last_modified if not cfg['ignore_lastmod'] else 0, prox)
	if ret:
		if txt:
			txt = unicode(txt, aenc)
			if pg.last_modified != date or cfg['ignore_lastmod']:
				pg.last_modified = date
				t = net.extract_author(txt)
				if t and not pg.title:
					pg.title = t
					pg.upd = True
					#db.news.append(NewsItem(au, url, None, None, NewsItem.UPD))

				do_import(url, au, db, txt, subpath)
			return True

	else:
		if date==404:
			#FIXME: chenck encodings!
			log.debug('Page "%s: %s" (%s) deleted.', au.name, pg.title, url)
			progress(3, u'Page %s (%s: %s) deleted' % (url, au.name, pg.title))
			aurl=db.url_by_author(au)
			db.news.append(NewsItem(au, aurl, pg, strip_url(aurl, url), NewsItem.DEL))
			au.pages.pop(strip_url(aurl, url))
			return True
		else:
			return False # I/O error

	return True

def check_csv(csv, db):
	"""
	Check if csv has news for author in db
	"""
	if not csv: return
	au, pg = db.find_author_page(csv.url)
	if not au: return # author not in list
	if not au.check_csv: return # excluded from csv check
	if pg and (pg.disabled or not pg.check_csv): return # page excluded from check
	if not pg:
		pg = db.add_page(csv.url, auto_author=False)
		pg._new = True
		log.debug('CSV add page %s', csv.url)

	if not pg: return # some error on add

	if not pg.title:
		pg.title = csv.title
		pg.upd = True

	if (pg.csv_size <> csv.size_k) or (pg.timestamp <> csv.timestamp):
		log.debug('CSV new/update %s (%s->%s, %s->%s)', csv.url, pg.csv_size, csv.size_k, pg.timestamp, csv.timestamp)
		pg.csv_size = csv.size_k
		pg.timestamp = csv.timestamp
		pg.upd = True


def filter_verse(txt):
	"""
	Filter some html entities in text
	"""
	#txt = txt.replace(u'&nbsp;',u'\u00A0').replace(u'&quot;',u'"').replace(u'&gt;',u'>').\
	#	replace(u'&lt;',u'<').replace(u'&copy;',u'(c)').expandtabs(4)
	txt = txt.replace(u'&nbsp;',u'\u00A0').expandtabs(4)
	txt = txt.replace(u'</body>',u'').replace(u'</html>',u'').replace(u'</BODY>',u'').replace(u'</HTML>',u'')

	return txt


def do_diff(text_store_enc, filename):
	"""
	do htmldiff on passed text and loaded file.
	returns (Changed_bool, htmldiff_text_utf8)
	"""
	import difflib

	if os.access(filename, os.R_OK):
		try:
			# simple charset detect
			old = open(filename,'rb').read()
			i = old.find('charset=', 0, 512)
			enc = cfg['store_enc']
			if i > 0:
				j = old.find('"', i, i+64)
				if j > 0:
					enc = old[i+8 : j]
				else:
					j = old.find('\'', i, i+64)
					if j > 0:
						enc = old[i+8 : j]
			if not enc: enc = cfg['store_enc']
			if enc.lower()=='utf8': enc = 'utf-8'
			try:
				old = unicode(old, enc).encode('cp1251')

			except:
				log.error("Do_diff: can't convert saved file '%s' to unicode - wrong encoding (%s)" % (filename, enc))

			old = old.split('\n')

			if cfg['store_enc'].lower() not in ('cp1251', 'windows-1251'):
				txt = unicode(text_store_enc,cfg['store_enc']).encode('cp1251').split('\n')
			else:
				txt = text_store_enc.split('\n')
			di = difflib.HtmlDiff(tabsize=cfg['diff_tabsize'], wrapcolumn=cfg['diff_wrap_column'])
			_old = cfg['diff_head_old']
			if isinstance(_old, unicode): _old = _old.encode('cp1251')
			_new = cfg['diff_head_new']
			if isinstance(_new, unicode): _new = _new.encode('cp1251')
			s = di.make_table(txt, old, _new, _old, True, 0)
			if '<td></td><td>&nbsp;No Differences Found&nbsp;</td>' in s: return False, ''
			s=unicode(s,'cp1251').replace(u'&nbsp;',u'\u00A0').encode('utf-8')
			htm = '''<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<style type="text/css">
    table.diff {font-family:Courier; border:medium;}
    .diff_header {background-color:#e0e0e0}
    td.diff_header {text-align:right}
    .diff_next {background-color:#c0c0c0}
    .diff_add {background-color:#aaffaa}
    .diff_chg {background-color:#ffff77}
    .diff_sub {background-color:#ffaaaa}
</style>
</head><body>
%s
</body></html>''' % (s,)
			return True, htm
		except:
			log.exception('do_diff')
			return True, ''

	#
	#s = s.replace('ISO-8859-1', 'cp1251')

	return True, ''

def check_contents(aurl, page, purl, news, db):
	"""
	process checking with local files (download, clean, compare etc..)
	returns False on network I/O error, True otherwise
	"""
	au = db.find_author(aurl)
	if not au:
		log.debug('Author not found for %s', aurl)
		return True

	full_url = net.normalize_url(aurl, purl)
	r, txt, lm, cs = net.getfile(full_url, page.last_modified if cfg['ignore_lastmod'] else 0, prox)
	if r:
		pg = au.pages.get(purl)
		if not pg:
			log.debug('Invalid, but existing page "%s" for "%s"', full_url, au.name)
			return True

		if not txt: # empty - not changed
			news.flag = None
			return True

		verse = net.extract_body(unicode(txt,cs))
		if not verse:
			log.debug('Page %s is not verses page.', full_url)
			progress(3, u'Page %s is not verses page' % (full_url,))
			news.flag = None
			return True

		verse = filter_verse(verse)

		# make html page with verse
		title_text=u'%s. %s.' % (au.name, pg.title)
		n_comm = full_url.find('/', full_url.find('//')+2)
		m_comm = full_url.rfind('.')
		comm_text = '%s/comment%s' % (full_url[:n_comm], full_url[n_comm:m_comm])
		href_text=u'<a href="%s">%s</a><br/><a href="%s">Комментарии</a></br>' % (full_url, title_text, comm_text)

		htm = (u"""<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=%(encoding)s" />
<title>%(title)s</title>
</head>
<body>
<div align="right"><small>%(href)s</small></div>
%(verse)s
</body></html>
""" % {'verse':verse, 'title':title_text, 'href':href_text, 'encoding':cfg['store_enc']}).encode(cfg['store_enc'])

		# md5 check
		md = md5(htm) # doesn't support unicode
		if pg.md5 == md.hexdigest(): # not changed
			news.flag = None
			log.debug('MD5: not changed %s (%s)', full_url, md.hexdigest())
			return True
		pg.md5 = md.hexdigest()

		if not cfg['store_local']:
			return True

		# prepare local filesystem
		href = urlsplit(full_url)
		path, file_name = href.path.rsplit('/',1) #NB! possible exception
		if path[0]=='/': path = path[1:]
		file_name = os.path.splitext(file_name)[0] + '.html'
		store_path = os.path.normpath(os.path.join(cfg['store_path'], path))
		if pg.subpath: store_path = os.path.normpath(os.path.join(store_path, pg.subpath))
		bak_path = os.path.normpath(os.path.join(store_path, cfg['bak_subpath']))
		dif_path = os.path.normpath(os.path.join(store_path, cfg['dif_subpath']))

		#
		# prepare dir tree
		try:
			if not os.path.exists(store_path): os.makedirs(store_path)
		except:
			log.exception('Create store path "%s" faied.', store_path)
			return True

		if cfg['use_diff']:
			chg, diff_htm = do_diff(htm, os.path.join(store_path, file_name))
			if not chg:
				news.flag = None
				log.debug('DIFF: no changes on %s vs %s', full_url, file_name)
				return True
			elif diff_htm and not cfg['dry']:
				try:
					if not os.path.exists(dif_path): os.makedirs(dif_path)
					dif_name = os.path.splitext(file_name)[0] + '.diff.htm'
					f = open(os.path.join(dif_path, dif_name), 'wb')
					f.write(diff_htm)
					f.close()
					news.diff_name = os.path.join(dif_path, dif_name)
					news.flag = NewsItem.UPD
					log.debug('News set diff_name to: "%s"' % (news.diff_name,))
				except:
					log.exception('diff save.')


		if cfg['store_bak'] and os.access(os.path.join(store_path, file_name), os.R_OK):
			try:
				if not os.path.exists(bak_path): os.makedirs(bak_path)
				bak_name = os.path.splitext(file_name)[0] + '.bak'
				si = os.stat(os.path.join(store_path, file_name))
				nl = len(htm)
				if si.st_size and nl:
					sz_diff = nl - si.st_size
					news.size_diff = sz_diff
					news.flag = NewsItem.UPD
					log.debug('News set size_diff to: %s' % (news.size_diff,))
					gr = 1.0 + abs(cfg['grow_percent']/100.0)
					shr = 1.0 - abs(cfg['shrink_percent']/100.0)
					if (gr >= (1.0*nl / si.st_size) >= shr) or (abs(sz_diff) < 512):
						pass
					else:
						bak_name += '.%s' % (time.strftime('%Y%m%dT%H%M', time.localtime(si.st_mtime)))
					if not cfg['dry']:
						try: os.remove(os.path.join(bak_path, bak_name))
						except: pass
						os.rename(os.path.join(store_path, file_name),os.path.join(bak_path, bak_name))
			except:
				log.exception('store .bak')

		# store file
		if not cfg['dry']:
			try:
				f = open(os.path.join(store_path, file_name),'wb')
				f.write(htm)
				f.close()
				news.local_name = os.path.join(store_path, file_name)
				news.size = len(htm)
			except:
				log.exception('Save local copy fail %s --> %s', full_url, os.path.join(store_path, file_name))


	# --- if r ---
	else:
		if lm==404: # deleted
			log.debug('Deleted: %s', full_url)
			news.flag = NewsItem.DEL
			au.pages.pop(purl)
			#del au.pages[purl]
			#FIXME: auto-store
			#if not cfg['dry']: db.store()
			#if not cfg['dry']: db.store_news()
			return True
		else:
			return False

	return True

def process_db(db):
	"""
	Do all magic :)
	"""
	scan_date = time.time()
	try:
		#prox = cfg.get('proxy', None) if not cfg.get('no_proxy', False) else {}
		if cfg['dry']:
			log.info('*** --dry-run specified. There is no modifications will be applied. ***')
			progress(3, '--dry run passed. No modifications will be made.')

		# 1. Load .CSV
		if not cfg['csv_disable']:
			log.debug('Load 4lib_news...')
			progress(level=1, msg=u"Get .CSV news")
			# (Result, [LibNewsItem], new_timestamp)
			res, csv, new_ts = net.load_4lib('http://%s/%s' % (cfg['site'], cfg['csv_news']), (db.csv_lastmod if not cfg['csv_reload'] else 0), prox)
			if res:
				if db.csv_timestamp or cfg['csv_reload']:
					csv = [x for x in csv if x.timestamp > db.csv_timestamp]

				if csv:
					ii = len(csv)
					i0 = 1
					for ci in csv:
						#try:
						progress(level=2, msg=u"%s: %s" % (ci.author, ci.title), percent=i0*100/ii)
						#except:
						#	log.debug("progress [%s/%s/, %s/%s/]" % (repr(ci.author),type(ci.author), repr(ci.title), type(ci.author)))
						check_csv(ci, db) # set upd flag on changed items
						i0 += 1

					db.sv_timestamp = max(map(lambda x:x.timestamp, csv))

				db.csv_lastmod = new_ts
			else:
				log.error("load .csv: network I/O fail.")
				progress(3, "Network I/O error loading %s" % (cfg['csv_news'],))
				if new_ts == 404: # site ok, no file
					pass
				else:
					return False

		progress(level=1, msg=u"")
		progress(level=2, msg=u"")


		ii = len(db.data)
		i0 = 1
		progress(level=1, msg=u"Process authors")
		for url, au in db.data.iteritems():
			progress(level=1, msg=u"Process: %s" % (au.name or url), percent=i0*100/ii)
			progress(level=3, msg=u"Process: %s" % (au.name or url))
			i0 += 1

			# 2. check root and issub pages if allow_import is set
			# import new pages
			if not cfg['csv_only'] and au.allow_import:
				#log import pages for ...
				#print "Process ", au.name
				log.info('%s (%d)', au.name.encode('utf-8'), len(au.pages))
				if not import_author(url, au, db):
					log.error("import_author: Network I/O fail.")
					progress(3, "Newtork I/O error")
				#if not cfg['dry']: db.store()

				jj = len(au.pages)
				j0 = 1
				for uu, pp in [(x,y) for x,y  in au.pages.items() if y.issub and not y.disabled]:
					progress(level=2, msg=u"Import: %s" % (pp.title or uu), percent=j0*100/jj)
					j0 += 1
					if not import_sub(net.normalize_url(url, uu), au, pp, db, y.subpath):
						log.error("import_sub: Network I/O fail.")
						progress(3, "network I/O error.")
						return False

				if not cfg['dry']: db.store()

			# 3. Scan all files with 'upd' or check_lastmod set
			lsts=[(x,y) for x,y  in au.pages.items() if (y.upd or y.check_lastmod or hasattr(y, '_new')) and not (y.disabled or y.issub)]
			jj = len(lsts)
			j0 = 1
			for uu, pp in lsts:
				progress(level=2, msg=u"Scan: %s" % (pp.title or uu), percent=j0*100/jj)
				j0 += 1
				ni = NewsItem(au, url, pp, net.normalize_url(url, uu), NewsItem.UPD)
				ni.scan_date = scan_date
				if getattr(y, '_new', False): ni.flag = NewsItem.NEW
				# check time/contents
				try:
					# load file, compare md5, store/check local file(s) if allowed
					if not check_contents(url, pp, uu, ni, db):
						log.error("check_contents: Network I/O fail.")
						progress(3, "network I/O error.")

					if getattr(y, '_new', False): ni.flag = NewsItem.NEW
				except:
					log.exception('check_local_file')
				else:
					if ni.flag != None:
						if pp.subpath:
							#
							x = [(xx,yy) for xx,yy in au.pages.items() if yy.issub and yy.subpath==pp.subpath]
							if x:
								ni.group_title = x[0][1].title
						db.news.append(ni)

			#TODO: news: preload old news, add local/diff paths, size diff, make resulting page, delete processed items

			#db.news.append(ni)

		progress(1,u"done",100)
		progress(2,u"",100)

		return True

	except:
		log.exception('process_db:')
		return False



def main(argv):
	arg = init(argv[1:])
	if log.level <= logging.INFO:
		log.info("starts with: %s" % (' '.join(argv[1:])))

	if cfg.get('check_inet_only', False):
		if net.is_inet_available():
			return 0
		else:
			return 1

	db = load_db()
	db.load_news(cfg['news'])

	progress(3, "=== %s ===" % (time.strftime("%Y.%m.%d %H:%M:%S", time.localtime())))

	if cfg.get('add', False) or cfg.get('add_author',False) and arg:
		do_add(db, arg)
		if not cfg['dry']: db.store()

	#TODO: test remove_url
	if cfg.get('remove_url', False):
		log.info(u"Try remove url '%s'", cfg['remove_url'])
		au, pg = db.find_author_page(cfg['remove_url'])
		if au and pg:
			if db.remove_page(pg):
				progress(3, u"The '%s' removed from '%s' (url: %s)" % (pg.title, au.name, cfg['remove_url'],))
			else:
				progress(3, u"Can't remove '%s' from '%s' (url: %s)" % (pg.title, au.name, cfg['remove_url'],))
		else:
			progress(3, u"Unable remove url '%s': author or page not found." % (cfg['remove_url'],))

	#TODO: test remove_author
	if cfg.get('remove_author', False):
		log.info(u"Try remove author by url '%s'", cfg['remove_author'])
		au = db.find_author(cfg['remove_author'])
		if au:
			if db.data.pop(db.url_by_author(au)):
				progress(3, u"The %s was removed (by url: %s)" % (au.name, cfg['remove_author'],))
			else:
				progress(3, u"Can't remove %s (by url: %s)" % (au.name, cfg['remove_author'],))
		else:
			progress(3, u"Can't remove author by url '%s': author not found." % (cfg['remove_author'],))
			log.info(u"Can't remove author by url '%s': author not found." % (cfg['remove_author'],))

	nret = True
	if not cfg['no_check']: nret = process_db(db)
	else: log.debug("--no-check passed. Scanning disabled.")

	if not cfg['dry']:
		db.store()
		db.store_news(cfg['news'])
		if cfg['store_htmnews']:
			try:
				import format_news
				f = open(cfg['news_htm'],'wb')
				try:
					format_news.news_htm(f, db.news)
				finally:
					f.close()
			except:
				log.exception('Save HTM news failed.')
				pass

	if nret: return 0
	return 1


if __name__=="__main__":
	#import cProfile as profile
	#profile.run('main()', sort=1)
	sys.exit(main(sys.argv))
