#!/usr/local/bin/python

import os
import re
import sys
import codecs
import feedparser

from readerconf import *;
import cburglish

feeds = []
tags = []
synonyms = {}

posts = {}
post_id = 0
extract_id = 0

ZAWGYI = 0
UNICODE = 1

def extract_entry(entry):
# 1. Title
# 2. Author
# 3. Tags
# 4. Link
# 5. Summary
	global extract_id

	title = re.sub('\s+', ' ', entry['title'].strip())
	if cburglish.detect(title) == ZAWGYI:
		title = cburglish.strip(title, ZAWGYI)
		title = cburglish.normalize(title, ZAWGYI)
		title = cburglish.zawgyi2unicode(title)
		title = cburglish.reorder(title, UNICODE)

	if entry.has_key('author'):
		#Normalize author
		author = entry['author'].strip()
		if cburglish.detect(author) == ZAWGYI:
			author = cburglish.strip(author, ZAWGYI)
			author = cburglish.normalize(author, ZAWGYI)
			author = cburglish.zawgyi2unicode(author)
			author = cburglish.reorder(author)
	else:
		author = ''
		
	tags = ''
	# convert to u5.1

	if entry.has_key('link'):
		link = entry['link'].strip()
	else:
		link = ''

	if entry.has_key('summary'):
		#Parse HTML in later versions
		summary = entry['summary'].strip()
		# convert Zawgyi to Unicode 5.1
		if cburglish.detect(summary) == ZAWGYI:
			summary = cburglish.strip(summary, ZAWGYI)
			summary = cburglish.normalize(summary, ZAWGYI)
			summary = cburglish.zawgyi2unicode(summary)
			summary = cburglish.reorder(summary, UNICODE)
	else:
		summary = ''

	extract_id += 1
	ef = codecs.open(os.path.join(EXTRACTS_PATH, 'extract%d.txt' % extract_id), 'w', encoding='utf-8')
	ef.write('%s\n%s\n%s\n%s\n%s\n' % (title, author, tags, link, summary))
	ef.close()
		

if __name__ == '__main__':
	#Extract data from each feed
	for feed in os.listdir(FEEDS_PATH):
		if feed.endswith('.xml'):
			print 'Extracting %s ...' % feed,
			data = feedparser.parse(os.path.join(FEEDS_PATH, feed) )
			for entry in data['entries']:
				extract_entry(entry)
			print 'Done'
			
	print 'Extraction complete.'
