#!/usr/local/bin/python

import os
import re
import sys
import codecs
import sqlite3

from readerconf import *

feeds = []
pretags = []
synonyms = {}

bytags = {}
byauthor = {}
bysource = {}

posts = {}
post_id = 0


def index_extract(extract):
	global post_id, posts, bytags, byauthor, bysource
	
	ef = codecs.open(extract, encoding='utf-8')
	
	# Index title
	title = ef.readline().strip()
	title = re.sub('\s+', ' ', title)

	# Index authors
	author = ef.readline().strip()
	authors = []
	if len(author) == 0:
		authors.append('ANONYMOUS')
	elif re.match('\d+\.\d+\.\d+\.\d+$', author):	# IP address
		authors.append('ANONYMOUS')
	else:
		# Wipe out E-mail address
		author = re.sub('\w+?@\w+?\.(com|org|net|edu|gov|mil|ac|co)(\w\w)?', '', author)
		author = re.sub('\s+', '', author)
		author = re.sub('\(|\)|-|_', '', author)
		author = author.lower()
				
		# Process Author Synonyms together
		for _author in author.split():
			if synonyms.has_key(unicode(_author)):
				_author = synonyms[_author]
			authors.append(_author)

	tags = ef.readline().split()

	# Index link
	link = ef.readline().strip()
		
	# Index contents
	content = title
	summary = ef.readline().strip()
	content += summary

	ef.close()
	post_id += 1
	post_idstr = str(post_id)
	posts[post_id] = [title, link, authors[0]]
	
	SQL = 'insert into posts values(%s, %s, %s, %s, %s);'

	# By Tags
	for token in content.split():
		if re.match('\w+$', token):
			token = str(token)
			token = token.lower()
			if token in pretags:
				tags.append(token)
	if len(tags) == 0:
		tags.append('OTHER')
	for tag in tags:
		if not bytags.has_key(tag):
			bytags[tag] = set()
		bytags[tag].add(post_idstr)

	# By Author
	for _author in authors:
		if not byauthor.has_key(_author):
			byauthor[_author] = set()
		byauthor[_author].add(post_idstr)
	


if __name__ == '__main__':
	#Get user defined tags
	for line in open('/var/www/vhosts/reader/tags.txt').readlines():
		pretags.append(line.strip('\n'))

	#Get author synonyms
	for line in codecs.open('/var/www/vhosts/reader/synonyms.txt', encoding='utf-8').readlines():
		line = line.strip('\n')
		cols = line.split('\t')
		synonyms[cols[0]] = cols[-1]
	print synonyms

	#Index each extract
	for extract in os.listdir(EXTRACTS_PATH):
		if extract.endswith('.txt'):
			print 'Indexing %s ...' % extract
			index_extract(os.path.join(EXTRACTS_PATH, extract))
			print 'Done'
			
	#ByTags index
	bytags_f = codecs.open('/var/www/vhosts/reader/bytags.txt', 'w', encoding='utf-8')
	for tag in bytags.keys():
		bytags_f.write('%s\t%s\n' % (tag, ' '.join(bytags[tag])))
	bytags_f.close()

	#ByAuthor index
	byauthor_f = codecs.open('/var/www/vhosts/reader/byauthor.txt', 'w', encoding='utf-8')
	for author in byauthor.keys():
		byauthor_f.write('%s\t%s\n' % (author, ' '.join(byauthor[author])))
	byauthor_f.close()
	
	posts_f = codecs.open('/var/www/vhosts/reader/posts.txt', 'w', encoding='utf-8')
	for post_id in posts.keys():
		posts_f.write('%s\t%s\t%s\t%s\n' % (post_id, posts[post_id][0], posts[post_id][1], posts[post_id][2]))
	posts_f.close()

	print 'Indexing complete.'
