#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: sixml.py 15 2009-07-05 18:50:54Z av1024 $
import sys
sys.path.append('..')
from sidb import *
from siglobals import log

from time import localtime, strftime, strptime, mktime
from xml.etree import cElementTree as ET

def intdef(astr, default=0):
	try:
		return int(astr)
	except:
		return default

def unc(s):
	if isinstance(s, unicode): return s
	return unicode(s, 'utf-8')

class XMLdb(GenericDB):
	def __init__(self, filename):
		#from xml.etree.cElementTree import ElementTree as ET
		super(XMLdb, self).__init__()
		self.filename = filename
		if self.filename and os.path.exists(self.filename):
			log.debug("LOAD %s" % (self.filename,))
			self.load(open(self.filename, 'rb'))

	def __del__(self):
		"""
		Override destructor for save file.
		NB! Exception-handling does not work here!!! All errors are printed to stderr
		"""
		if self.filename:
			try:
				#self.store(open(self.filename, 'wt'))
				pass
			except:
				pass

	def load(self, file):
		try:
			xml = ET.parse(file)
			if not xml:
				return False
			root = xml.getroot()
			self.data.clear()
			self.csv_timestamp = 0
			try:
				c = root.find('csv_timestamp')
				self.csv_timestamp = int(c.get('value','0'))
				self.csv_lastmod = int(c.get('last_mod','0'))
			except:
				pass

			for c in root.findall('author'):
				url = c.get('href',None)
				if not url:
					#log.warning
					continue

				a = Author(
					name=c.get('name',''), # always unicode
					last_modified=intdef(c.get('lm', 0)),
					allow_import=c.get('imp','yes').lower() in ('yes','true','1','on'),
					allow_recurse=c.get('rec','no').lower() in ('yes','true','1','on'),
					check_csv=c.get('csv','no').lower() in ('yes','true','1','on'),
					check_lastmod=c.get('chk','no').lower() in ('yes','true','1','on')
					)
				pg = {}
				for p in c.findall('page'):
					purl = p.get('href',None)
					if not purl:
						#log.warning
						continue
					pp = Page(
						title=p.get('title',''),
						last_modified=intdef(p.get('lm', 0)),
						size=intdef(p.get('hsize', 0)),
						timestamp=intdef(p.get('tstamp', 0)),
						csv_size=p.get('csize', ''),
						subpath=p.get('sub', ''),
						md5=p.get('md5', ''),
						issub=p.get('is_sub','no').lower() in ('yes','true','1','on'),
						check_lastmod=p.get('chk','no').lower() in ('yes','true','1','on'),
						check_csv=p.get('csv','yes').lower() in ('yes','true','1','on'),
						disabled=p.get('dis','no').lower() in ('yes','true','1','on')
					)
					pg[purl] = pp
				a.pages.update(pg)
				self.add_author(url, a)

			return True
		except Exception, e:
			log.exception('')
			return False


	def store(self, afile=None):
		#print "TRY STORE"
		self_file = False
		if not afile and self.filename:
			afile = open(self.filename,'wb')
			self_file = True
		try:
			root = ET.Element('samizdat')
			root.text = '\n'
			cattr = {
				'value': '%s' % (self.csv_timestamp,),
				'last_mod': '%s' % (self.csv_lastmod,)
			}
			c = ET.Element('csv_timestamp', cattr)
			c.tail = '\n'
			root.append(c)
			for aurl, aval in self.data.iteritems():
				attrs = {
					'href':aurl,
					'name': aval.name, #.encode('utf-8'),
					'lm': '%d' % (aval.last_modified,),
					'imp': '%s' % (int(aval.allow_import),),
					'rec': '%s' % (int(aval.allow_recurse),),
					'csv': '%s' % (int(aval.check_csv),),
					'chk': '%s' % (int(aval.check_lastmod),),
				}
				au = ET.Element('author', attrs)
				au.text = '\n\t'
				au.tail = '\n'

				for purl, pval in aval.pages.iteritems():
					pattr = {
						'href':purl,
						'title':pval.title, #.encode('utf-8'),
						'lm': '%d' % (pval.last_modified,),
						'hsize': '%d' % (pval.size,),
						'tstamp': '%d' % (pval.timestamp,),
						'csize': pval.csv_size, # string (?)
						'sub': pval.subpath,
						'md5': pval.md5,
						'is_sub': '%s' % (int(pval.issub),),
						'chk': '%s' % (int(pval.check_lastmod),),
						'csv': '%s' % (int(pval.check_csv),),
						'dis': '%s' % (int(pval.disabled),),
					}
					pg = ET.Element('page', pattr)
					pg.tail = '\n\t'
					au.append(pg)
				try:
					pg.tail = u'\n'
				except:
					pass

				root.append(au)

			#print >> file, '<?xml version="1.0" encoding="utf-8"?>'
			print >> afile, ET.tostring(root, encoding='UTF-8')
		except:
			log.exception('')
			return False
		finally:
			if self_file:
				afile.close()

		return True

	def store_news(self, file):
		self_file = False
		if not file: return False
		if isinstance(file, (str, unicode)):
			if os.access(file, os.W_OK):
				try:
					try:
						os.remove(file + '.bak')
					except: pass
					os.rename(file, file + '.bak')
				except:
					pass
			f = open(file,'wb')
			self_file = True
		try:
			root = ET.Element('samizdat')
			root.text = '\n'
			for n in self.news:
				# (author, author url, page, page_url, flag)
				niattr = {
					'flag': n.flagstr,
				#	'href':n.page[0],
					'title':n.page[1].title,
					'atitle': n.author[1].name,
					'ahref': n.author[0],
				}
				if n.page[0].startswith(n.author[0]):
					niattr['href'] = n.page[0][len(n.author[0])-1:]
				else:
					niattr['href'] = n.page[0]

				if n.size: niattr['size'] = '%s' % (n.size,)
				if n.size_diff: niattr['size_diff'] = '%s' % (n.size_diff,)
				if n.local_name: niattr['local_name'] = n.local_name.replace('\\','/')
				if n.diff_name: niattr['diff_name'] = n.diff_name.replace('\\','/')
				if n.scan_date: niattr['date'] = strftime("%Y%m%dT%H:%M:00",localtime(n.scan_date))
				if n.group_title: niattr['gtitle'] = n.group_title
				ni = ET.Element('news', niattr)
				ni.tail = '\n'
				root.append(ni)

			print >> f, ET.tostring(root, encoding='UTF-8')
		except:
			log.exception('')
			return False
		finally:
			if self_file:
				f.close()

	def load_news(self, file):
		try:
			xml = ET.parse(file)
			if not xml:
				return False
			root = xml.getroot()
			for n in root.findall('news'):
				a1 = n.get('ahref','')
				p1 = n.get('href','')
				if p1.startswith('/'): p1 = a1[:-1] + p1;
				au, pg = self.find_author_page(p1)
				if au and pg:
					ni = NewsItem(au, self.url_by_author(au), pg, p1, NewsItem.str2flag(n.get('flag','upd')))
					#ni.flag = NewsItem.str2flag(n.get('flag','upd'))
					ni.size = int(n.get('size','0'))
					ni.size_diff = int(n.get('size_diff','0'))
					ni.local_name = n.get('local_name', u'')
					ni.diff_name = n.get('diff_name', u'')
					ni.scan_date = mktime(strptime(n.get('date','19700101T00:00:00'), '%Y%m%dT%H:%M:%S'))
					ni.group_title = n.get('gtitle',u'')
					self.news.append(ni)
				else:
					log.debug('Loadnews: author/page not found')

			return True
		except:
			log.exception('')
			return False




if __name__=="__main__":
	import logging
	log.addHandler(logging.StreamHandler())
	log.setLevel(logging.DEBUG)

	import doctest
	doctest.testmod()


	b = XMLdb('./samizdat-db.xml')
	print "Timestamp:", b.csv_timestamp
	print "*** authors ***"
	for a in [x.name for x in b.data.values()]:
		print "'%s'" % (a,)
	print "***"
	b.store() #open(b.filename,'wb'))

	pg1 = Page(title='Title 1 of author 1')
	a1 = Author(name='author 1', pages={'href_p1_a1':pg1})
	a2 = Author(name='author 1', pages={'href_p1_a1':pg1})

	a1.pages['href_p2_a1'] = Page(title='Title 2 of author 1')

	b.add_author('href_1',a1)

	a2.pages['href_test_add'] = Page(title='title 3 for test add')

	print '%s' % (b.data,)
	print b.file_urls()

	b.add_author('href_1',a2)

	print '%s' % (b.data,)
	print b.all_urls()

	b.add_page('http://aaa.bb.cc/d/dd-ddd-dddddd/pagename.html', Page(), True)
	b.add_page('http://aaa.bb.cc/d/dd-ddd-dddddd/pagename2.html', Page(), True)
	b.add_page('http://aaa.bb.cc/d/dd-ddd-dddddd/pagename3.html', Page(), True, 'http://samizdat.ru/')

	#b.store(open('test_file.cfg', 'wb'))
	#b.store(open(b.filename,'wb'))

	#import json
