#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: siglobals.py 15 2009-07-05 18:50:54Z av1024 $
"""
siglobals.py
Common variables and structtures.
"""

import logging
from re import I, U, S, M, L, X
import sys
import codecs
from locale import getpreferredencoding

sys.stdout = codecs.getwriter(getpreferredencoding())(sys.stdout)

log = logging.getLogger('ZHCheck') # common logging object

#NB! Only base types (int,bool,float,str,unicode) and list/tuple/dict with base types allowed
#    All other will be saved as base64-encoded pickle
cfg = {
	'proxy_auth' : None, # common proxy authentication base64(username:password) if present
	'store_auth': True,
	'no_proxy': False,
	'proxy': None,

	'site': 'zhurnal.lib.ru',
	'default_charset' : 'windows-1251', # sefault site encoding
	'if_modified_supported' : True, # HTTP header If-Modified-Since is supported

	'store_enc' : 'utf-8',
	'store_path' : './txt',
	'bak_subpath' : 'bak',	#FIXME: cmdline switch
	'dif_subpath' : 'dif',	#FIXME: cmdline switch
	'log_file': './samizdat.log',
	'log_level': 'DEBUG',
	'progress': False, # print progress info
	'dry': False,

	'store_local' : True,
	'store_bak' : True,
	'use_diff' : True,
	'store_htmnews' : True,

	'diff_tabsize': 4,
	'diff_wrap_column': 60,
	'diff_head_old':u"Старый",
	'diff_head_new':u"Новый",
	'grow_percent':10, # сравнение размеров файлов для умного .bak
	'shrink_percent':5,

	'csv_news' : '4lib_news', # CSV news url name, w/o basic site
	#'csv_timestamp' : 0, # 4lib_news last update timestamp
	'csv_reload': False, # force reload 4lib_news
	'csv_disable': False, # disable 4lib_news usage

	# system filenames. excluded form parse/download
	'stop_names' : ['index.shtml', 'indexdate.shtml', 'indexvote.shtml', 'indextitle.shtml', 'about.shtml', 'stat.shtml',
					'rating.shtml', 'rating1.shtml','publish.shtml'],


	# database filename or name
	#FIXME: pathnames via cmdline, remove relative paths
	'base' : './data/database.xml',
	'news' : './data/news.xml',
	
	'news_htm' : './data/news.htm',

	# check options
	'ignore_lastmod': False,
	'no_check': False,
	'csv_only':False, # check 4lib_news only
	'check_inet_only':False, # only check {site} availability, return 0 on success, 1 on failure


	# actual text start/stop sequences
	#'text_start' : u'-- Собственно произведение --',
	#'text_stop' : u'<hr size=2 noshade>',

	're_title' : ('<title>.*?\.(?P<title>.+?)(?:(\..*)|(?:[$<]))', I | U), # regexp for author:title
	're_hrefs' : (u"\<a\s+.*?href=[\"|\']?(?P<href>\S*?)[\"|\']?(?:(?:\s*?)|(?:\s+.*?))\>(?:\<.+?\>)*(?P<title>.+?)(?:\</.+?\>)*?\</a\>", I | U), # regexp for extracting hrefs
	're_hrefs_kb': (u"(?P<sz>\d+k).*?", I | U),
	're_body' : (u"<!-+?\sСобственно произведение\s-+>\s+(?P<txt>.*?)<!-+?>\s+<hr size=2 noshade>", I | U | S),
}

__default_re = {
	're_title' : (r'<title>.*?\.(?P<title>.+?)((\..*)|([$<]))', I | U), # regexp for author:title
	're_hrefs' : (u"\<a\s+.*?href=[\"|\']?(?P<href>\S*?)[\"|\']?(?:(?:\s*?)|(?:\s+.*?))\>(?:\<.+?\>)*(?P<title>.+?)(?:\</.+?\>)*?\</a\>", I | U), # regexp for extracting hrefs
	're_hrefs_kb': (u"(?P<sz>\d+k).*?", I | U),
	're_body' : (u"<!-+?\sСобственно произведение\s-+>\s+(?P<txt>.*?)<!-+?>\s+<hr size", I | U | S),
}

regexp = {
# afte init should be like: 're_title': re.compile(__default_re['re_title']),
}






def strip_url(url, href):
	"""
	Strip path from url.
	Returns only filename or name with subpath not contianing in url.

	>>> strip_url('http://mysite.com/p/some_path', 'http://mysite.com/p/some_path/my_cool_file.html')
	'/my_cool_file.html'
	"""
	h = href.lower()
	if h[0]=='/' and h.startswith(url[6:]):
		h = h[len(url)-6:] # unique-path only
	elif h.startswith(url): h = h[len(url):]
	return h


def print_progress(level=1, msg=None, percent=None, file=None):
	"""
	Print preformatted message to file object or stdout.
	If passed logging.logger class as file then 'INFO' logging level used.
	"""
	if not cfg['progress']: return
	try:
		level=int(level)
	except:
		level=1
	s = u"PROGRESS %d: [%s] %s" % (level, percent if percent!=None else u"", msg if msg!=None else u"")

	if file:
		if hasattr(file, 'write'):
			file.write(u"%s\n" % (s,)) # file
			if hasattr(file, 'flush'): file.flush()
		elif hasattr(file, 'info'): file.info(s) # logger
	else:
		print s
		sys.stdout.flush() # for gui readers
