#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id$
"""
sidb.py.
Abstract data access wrapper.
"""
import sys
sys.path.append('..')
from siglobals import log, strip_url

import os.path
from cPickle import dump, load, HIGHEST_PROTOCOL

class Page(object):
	def __init__(self, **kwargs):
		"""
		Optional args: title=''
		"""
		self.title = kwargs.pop('title',u'')
		self.last_modified = kwargs.pop('last_modified',0) # from http
		self.size = kwargs.pop('size',0)	# from http
		self.timestamp = kwargs.pop('timestamp',0) # from 4lib_news
		self.csv_size = kwargs.pop('csv_size', '') # from 4lib_news
		self.subpath = kwargs.pop('subpath','') # store subpath for groups
		self.md5 = kwargs.pop('md5','') # crc for compare
		self.issub = kwargs.pop('issub',False) # page is grouping page, not verse
		self.check_lastmod = kwargs.pop('check_lastmod', True)
		self.check_csv = kwargs.pop('check_csv', True)
		self.disabled = kwargs.pop('disabled', False)

		#self.url = kwargs.pop('url',u'')
		# NB! exclude from load/store
		self.upd = False

	def __getstate__(self):
		#TODO: return dict with allowed properties only
		odict = self.__dict__.copy()
		for n in self.__dict__.iterkeys():
			if n.startswith('_'): del odict[n]
		del odict['upd']
		return odict

	def __setstate__(self, dict):
		self.__dict__.update(dict)
		self.upd = False

	def __unicode__(self):
		return u"<Page: %s>" % (unicode(self.title, 'utf-8') if isinstance(self.title, str) else self.title,)

	def __str__(self):
		return self.__unicode__().encode('utf-8')

class Author(object):
	def __init__(self, **kwargs):
		"""
		Optional args: name='', last_modified=0, allow_import=True, allow_recurse=False,
					check_csv=True, check_lastmod=True, pages=list(Page)
		"""
		self.name = kwargs.pop('name',u'')
		self.last_modified = kwargs.pop('last_modified',0)
		self.allow_import = kwargs.pop('allow_import', True)
		self.allow_recurse = kwargs.pop('allow_recurse', False) #FIXME: not used - exclude
		self.check_csv = kwargs.pop('check_csv', True)
		self.check_lastmod = kwargs.pop('check_lastmod', False)
		self.pages = kwargs.pop('pages', {})

		# NB! exclude from load/store
		self.upd = False
		#page._new added on add page, shouldn't be saved!

	def add_page(self, url, page=None):
		"""
		Add page into pages dict if not exists.
		returns: added Page() class or None
		"""
		if not url: return None
		if not page: page = Page(check_csv=self.check_csv, check_lastmod=self.check_lastmod)
		if not isinstance(page, Page):
			raise TypeError('Not allowed type as page: %s' % (type(page),))

		p = self.pages.get(url.lower(), None)
		if p: return p

		self.pages[url.lower()] = page
		page.upd = True
		page._new = True
		return page

	def __unicode__(self):
		return u"<Author: %s>" % (unicode(self.name, 'utf-8') if isinstance(self.name, str) else self.name,)

	def __str__(self):
		return self.__unicode__().encode('utf-8')

class NewsItem(object):
	"""
	News record.
	"""
	# Flags:
	NEW = 1
	UPD = 2
	DEL = 3
	def __init__(self, author, author_url, page, page_url, flag):
		self.author = (author_url, author)
		self.page = (page_url, page)
		self.flag = flag
		self.size = 0
		self.size_diff = 0
		self.local_name = ''
		self.diff_name = ''
		self.scan_date = 0
		self.group_title = ''

	@staticmethod
	def str2flag(s):
		s = s.lower()
		if s == 'new': return NewsItem.NEW
		elif s == 'upd': return NewsItem.UPD
		elif s == 'del': return NewsItem.DEL
		return 0 # error

	@property
	def flagstr(self):
		return ('NEW','UPD','DEL')[self.flag-1] if self.flag in (self.NEW,self.UPD,self.DEL) else '?%s?' % (self.flag,)

	def __unicode__(self):
		return u"<NewsItem [%s] %s: %s" % (self.flagstr, self.author.name, self.page.title)

	def __str__(self):
		return self.__unicode__().encode('utf-8')

class GenericDB(object):
	"""
	Default data access wrapper.
	data format:
	{
		base_url: Author()
	}
	Author.pages:
	{
		url:Page()
	}
	csv_timestamp
	libnews_timestamp
	"""
	def __init__(self):
		self.data = {}
		self.news = [] # NewsItem
		self.csv_timestamp = 0
		self.csv_lastmod = 0

	def base_urls(self):
		"""
		Get root urls for all authors
		"""
		return [x for x in self.data.keys()]

	def file_urls(self):
		z=[]
		map(lambda q:z.extend((x for x in q.keys())), (x.pages for x in self.data.values()))
		return z

	def all_urls(self):
		x = self.file_urls()
		x.extend(self.base_urls())
		return x

	def find_author(self, url):
		"""
		Find Author by url or Page instance
		"""
		if not url: return None
		try:
			if isinstance(url, Page):
				au = [x for x in self.data.values() if url in x.pages.values()]
				if au: return au[0]
				return None
			# by string url
			s = str(url).strip()
			if not s: return None
			if s[-1]=='/': return self.data.get(s, None)
			return self.data.get(s.rsplit('/',1)[0]+'/')
		except:
			log.exception('Find author.')

	def find_author_page(self, url):
		"""
		Find author and page from url
		"""
		if not url: return None, None
		s = str(url).strip()
		if not s: return None, None
		if s[-1]=='/': return self.data.get(s, None), None
		s1,s2=s.rsplit('/',1)
		au = self.data.get(s1+'/')
		if not au: return None, None
		pg = au.pages.get(s2, None)
		if not pg:
			pg = au.pages.get('%s/%s' % (s1,s2), None)
		return au, pg

	def url_by_author(self, author):
		"""
		Returns url for specified author instance if exists.
		"""
		try:
			i = self.data.values().index(author)
			return self.data.keys()[i]
		except:
			return ''

	def url_by_page(self, page):
		"""
		Returns tuple (author_url, page_suburl) by Page
		"""
		if not isinstance(page, Page): return (None, None)
		try:
			a = [(x, y.pages.keys()[y.pages.values().index(page)]) for x, y in self.data.items() if page in y.pages.values()]
			if a:
				return a[0]
		except:
			log.exception('url_by_page')

		return None, None


	def add_author(self, url, theauthor=None):
		"""
		Adds url and Author() into data dictionary.
		Extends data[author.url].pages by author pages if exists
		Returns resulting author
		"""
		if not theauthor: theauthor = Author()
		if not isinstance(theauthor, Author):
			raise TypeError('Not allowed type in add_author')

		if url.lower() not in self.data:
			self.data[url.lower()] = theauthor
			return theauthor

		a = self.data[url.lower()]
		q = [(x.lower(),y) for x,y in theauthor.pages.iteritems() if x.lower() not in (z for z in a.pages.keys())]
		if q:
			a.pages.update(dict(q))

		return a

	def add_page(self, url, thepage=None, auto_author=False, author_url=None):
		"""
		Add page to author's dict.
		Auto add author in not exists (flag auto_author), set author's url to
		author_url if passed else construct from page url

		"""
		if not url: return
		if thepage and not isinstance(thepage, Page):
			raise TypeError('Not allowed type as page: %s' % (type(thepage),))

		# endswith / => url is author. add only author
		if url[-1] == '/':
			self.add_author(url, Author())
			return

		url = url.lower()
		# if no explicit author page => try exthact from url
		if not author_url:
			author_url = '%s/' % (url.rsplit('/',1)[0],)

		# check validity of author url
		if not author_url.startswith('http:'):
			log.warning('add_page: invalid author url. "%s"' % (author_url,))
			return

		# does not add page if no author exist and no autoadd flag
		if not auto_author and author_url not in self.data.keys():
			log.debug('add_page: no author for "%s", no autoadd. Url not added.' % (url,))
			return

		a = self.data.get(author_url, Author())
		thepage = a.add_page(strip_url(author_url, url), thepage)
		self.add_author(author_url, a)
		return thepage

	def remove_author(self, url_or_author):
		"""
		Remove author and all pages from database.
		Return: True on success, False if author or url not found
		"""
		if not url_or_author: return False
		try:
			if isinstance(url_or_author, Author):
				u = self.url_by_author(url_or_author)
				self.data.pop(u)
				#del u
				return True
			u = self.find_author(url_or_author)
			if u:
				u = self.url_by_author(u)
				self.data.pop(u)
				#del u
				return True
			log.debug("Can't find author for %s" % (url_or_author,) )
			return False
		except:
			log.exception('remove_author exception.')

	def remove_page(self, url_or_page):
		"""
		Remove page from db
		"""
		if not url_or_page: return False
		try:
			if isinstance(url_or_page, Page):
				au, pu = self.url_by_page(url_or_page)
				if au and pu:
					au = self.find_author(au)
					au.pages.pop(pu)
					return True
				return False

			au, pg = self.find_author_page(url_or_page)
			if au and pg:
				pg, pu = self.url_by_page(pg)
				au.pages.pop(pu)
				return True
			elif au:
				log.debug(u"No page found for url %s. Author: %s." % (url_or_page, au.name))
				return False
		except:
			log.exception('remove_page exception.')

	def store(self, file):
		"""
		Generic store. Using pickle as driver
		"""
		_data = (self.data, self.csv_timestamp, self.csv_lastmod)
		dump(_data, file, HIGHEST_PROTOCOL)

	def load(self, file):
		"""
		Generic load. Using pickle as driver
		"""
		_data = load(file)
		self.data = _data[0]
		self.csv_timestamp = _data[1]
		self.csv_lastmod = _data[2]

	def store_news(self, file):
		"""
		Generic store news list using pickle as driver
		"""
		dump(self.news, file, HIGHEST_PROTOCOL)

	def load_news(self, file):
		"""
		Generic load. using pickle
		"""
		self.news = load(file)


class LibNewsItem(object):
	"""
	Класс-обёртка для строки из 4lib_news.
	Строка парсится при создании класса или вызовом L{parse}.
	"""
	def __init__(self, line=None, separator=u'\t'):
		"""
		@type line: unicode
		@param line: строка из 4lib_news.
		@type separator: unicode
		@param separator: Разделитель CSV строки.
		"""
		self.parse(line, separator)

	def parse(self, line, separator=u'\t'):
		"""
		Разбор строки .CSV на составляюще.
		Формат строки (разделитель - tab):
			url,title,author,size_k,timestamp,genre,
		@type line: str
		@param line: Строка с разделителями табуляцией
		"""
		# make all fields
		self._url, self._title, self._author = None, None, None
		self._size, self._genre = None, None
		self._tstamp = 0
		if not line or not separator: return
		try:
			x = unicode(line).strip(separator).split(separator)
			i = len(x)
			self._url = x[0]
			self._title = x[1].strip() if i > 1 else u""
			self._author = x[2].strip() if i > 2 else u""
			self._size = x[3] if i > 3 else u""
			self._tstamp = int(x[4]) if i > 4 else 0
			self._genre = x[5].strip() if i > 5 else u""
		except:
			log.exception('LibNewsItem::parse fail, (incomplete?) string: "%s"' % (line,))

	def render(self, separator=u'\t'):
		"""
		Склеивает строку обратно в том же формате, что и 4lib_news.

		>>> x=LibNewsItem()
		>>> print x
		<LibNewsItem>: None None(None), 0
		>>> x.parse(u"http://aaa.bb.cc/u/file.htm;The title;Author;123k;111223334;Genre;", u";")
		>>> print x
		<LibNewsItem>: http://aaa.bb.cc/u/file.htm The title(Author), 111223334

		"""
		return separator.join((self.url, self.title, self.author, \
				self.size_k, u'%d' % (self.timestamp,), \
				self.genre if self.genre else u'', u''))

	@property
	def url(self): return self._url

	@property
	def title(self): return self._title

	@property
	def author(self): return self._author

	@property
	def size_k(self): return self._size

	@property
	def genre(self): return self._genre

	@property
	def timestamp(self): return self._tstamp

	def __unicode__(self):
		return u"<%s>: %s %s(%s), %d" % (self.__class__.__name__, self.url, self.title, self.author, self.timestamp)

	def __str__(self):
		return str(self.__unicode__().encode('utf-8'))

def _main():
	import logging
	log.addHandler(logging.StreamHandler())
	log.setLevel(logging.DEBUG)

	import doctest
	doctest.testmod()


	b = GenericDB()
	pg1 = Page(title='Title 1 of author 1')
	a1 = Author(name='author 1', pages={'href_p1_a1':pg1})
	a2 = Author(name='author 1', pages={'href_p1_a1':pg1})

	a1.pages['href_p2_a1'] = Page(title='Title 2 of author 1')

	b.add_author('href_1',a1)

	a2.pages['href_test_add'] = Page(title='title 3 for test add')

	print '%s' % (b.data,)
	print b.file_urls()

	b.add_author('href_1',a2)

	print '%s' % (b.data,)
	print b.all_urls()


	b.add_page('http://aaa.bb.cc/d/dd-ddd-dddddd/pagename.html', Page(), True)
	p = b.add_page('http://aaa.bb.cc/d/dd-ddd-dddddd/page-for-find.htm', auto_author=True)
	b.add_page('http://aaa.bb.cc/d/dd-ddd-dddddd/pagename2.html', Page(), True)
	b.add_page('http://aaa.bb.cc/d/dd-ddd-dddddd/pagename3.html', Page(), True, 'http://samizdat.ru/')

	a = b.find_author(p)
	x=b.url_by_page(p)
	print a, '-->', b.find_author('http://aaa.bb.cc/d/dd-ddd-dddddd/pagename.html'), ':', x

	print b.remove_page(p)
	b.add_page('%s%s' % x, p)

	print b.find_author_page('%s%s' % x)
	b.remove_page('%s%s' % x)
	print b.find_author_page('%s%s' % x)
	print b.find_author('%s%s' % x)
	print b.remove_author('%s%s' % x)
	print b.find_author('%s%s' % x)


	#b.store(open('test_file.cfg', 'wb'))

if __name__=="__main__":
	_main()
