'''
Created on Sep 1, 2011

@author: rarakar
'''

import nltk, re, sys
from nltk.util import ingrams
alphabet={}
ethndatedict = nltk.defaultdict()
concepts = nltk.defaultdict()
EXTINCT_INDEX = 3
MODIFIER_1 = 1
MODIFIER_2 = 2
MODIFIER_3 = 3
USEMODIFIER = True
ethndatedict = nltk.defaultdict()
XXX = ['X', 'X', 'X']
import globalvars
codenumber = 0
nGramOrder = 5
ethnwrdlendict = nltk.defaultdict(float)
ethnisodict = nltk.defaultdict(list)
ethnwrdattstdict = nltk.defaultdict(int)
ethnlangdict = nltk.defaultdict(list)
ethnlangpopdict = nltk.defaultdict(int)

def getAttestation_1(ls):
	"""Return the number of words attested"""
	for lang in ls.iterkeys():
		tempSum = 0
		for concept, wordlist in ls[lang].iteritems():
				for seq in wordlist:
					if seq != XXX:
						tempSum += 1
		ethnwrdattstdict[lang] = tempSum
	return ethnwrdattstdict

def getmeanWrdLength(ls):
	"""Get the mean word length for a language and store in dictionary"""
	for lang, value in ls.iteritems():
		for concept, wordlist in ls[lang].iteritems():
			for seq in wordlist:
					if seq != XXX:
						ethnwrdlendict[lang] += len(seq)

def getNettleStats_1(ls):
	import codecs
#	fout = codecs.open("Asjp-Stats-all13.txt","w","utf-8")
	fout = codecs.open("artificial-stats.txt","w","utf-8")
	fout.write("Language\tAttestation\n")
#	isodict = invertDict(ethnisodict)
	getAttestation_1(ls)
#	getmeanWrdLength(ls)
	cpd = freqmodels_1(ls)
	cfd = nltk.ConditionalFreqDist((langs,len(sample.split("-"))) for langs in cpd.conditions() for sample in cpd.__getitem__(langs).samples())
	for lang in ls.iterkeys():
#		meanlen = ethnwrdlendict[lang]/ethnwrdattstdict[lang]
#		fout.write(" ".join(iso)+"\t"+lang+"\t"+str(meanlen)+"\t"+str(ethnlangpopdict[lang])+"\t"+str(ethnwrdattstdict[lang])+"\t"+ethnfamdict[lang]+"\t")
		fout.write(lang+"\t"+str(ethnwrdattstdict[lang])+"\n")
	fout.close()
	return cfd

def getNettleStats(ls):
	"""Return the mean word length, language name, iso name, 
	n-gram diversity"""
	import codecs
#	fout = codecs.open("Asjp-Stats-all13.txt","w","utf-8")
	fout = codecs.open("artificial-stats.txt","w","utf-8")
	fout.write("ISO\tLanguage\tMWL\tPopulation\tAttestation\tFamily\tMSR\n")
	isodict = invertDict(ethnisodict)
	getAttestation_1(ls)
	getmeanWrdLength(ls)
	cpd = freqmodels_1(ls)
	cfd = nltk.ConditionalFreqDist((langs,len(sample.split("-"))) for langs in cpd.conditions() for sample in cpd.__getitem__(langs).samples())
	for lang, iso in isodict.iteritems():
		meanlen = ethnwrdlendict[lang]/ethnwrdattstdict[lang]
		fout.write(" ".join(iso)+"\t"+lang+"\t"+str(meanlen)+"\t"+str(ethnlangpopdict[lang])+"\t"+str(ethnwrdattstdict[lang])+"\t"+ethnfamdict[lang]+"\t")
		fout.write(str(cfd[lang][1])+"\n")
	fout.close()
	return cfd


def processASJPWord(word):
	"""Processes a ASJP word and converts it into a code string list"""
	word = word.rstrip(" ")
	word = word.lstrip(" ")
	codeword = []
	code = 0
	global codenumber
	if USEMODIFIER is False:
		for ch in word:
			if ch != "~" and ch != "$":
				if alphabet.has_key(ch):
					code = alphabet[ch]
				else:
					code = codenumber
					codenumber += 1
					alphabet[ch] = code
				codeword.append(code)			
	elif USEMODIFIER is True:
		wrdlist = merge_1(word)
		for w in wrdlist:
			if w == '':
				print word, wrdlist
				sys.exit()
			if w in alphabet:
				code = alphabet[w]
			else:
				code = codenumber
				codenumber += 1
				alphabet[w] = code
			codeword.append(code)		
#	print word, codeword
	return codeword

def merge_1(s):
	s=s.replace('\"','%')
#	print s
	s=s.replace('*','&')
#	print s
	sl=list(s)
	for ch in s:
		if ch == '%':
			idx = sl.index('%')
			sl.insert(idx+MODIFIER_1,"".join(sl[idx-MODIFIER_1:idx+MODIFIER_1]))
			del(sl[idx-MODIFIER_1:idx+MODIFIER_1])
		elif ch == '&':
			idx = sl.index('&')
			sl.insert(idx+MODIFIER_1,"".join(sl[idx-MODIFIER_1:idx+MODIFIER_1]))
			del(sl[idx-MODIFIER_1:idx+MODIFIER_1])
		elif ch == '~':
			idx = sl.index('~')
			sl.insert(idx+MODIFIER_1,"".join(sl[idx-MODIFIER_2:idx+MODIFIER_1]))
			del(sl[idx-MODIFIER_2:idx+MODIFIER_1])
		elif ch == '$':
			idx = sl.index('$')
			sl.insert(idx+1,"".join(sl[idx-MODIFIER_3:idx+MODIFIER_1]))
			del(sl[idx-MODIFIER_3:idx+MODIFIER_1])
	return sl

def cleanRow(row):
    row=row.strip("\r\n")
    row=row.replace("\t"," ")
    while row.find("  ")>-1:row=row.replace("  "," ")
    row=row.strip(" ")
    return row

def readASJPSpecified(f, search_key="ARTIFICIAL"):
	"""Read the ASJP file for a specified search key
	"""
	lists = {}
	ethn = ""
	lang = "xxx"
	extinct_flag = False
	#Discard the first two lines
	f.readline()
	f.readline()
	#the first loop reads concept words
	for row in f:
		items = row.split()
		if not items:
			break
		concepts[items[0]] = items[1]
	#the second loops reads and discards the phonemes
	for row in f:
		if not row.split():
			break
	#the third loop reads language data
	for row in f:		
		if not row.split():
			continue
		if row[0].isupper():
			row = cleanRow(row)
			index = row.find("{")
			lang_details = row
			lang = row[0:index]
			family = row[index+1:].split('|')
			index2 = family[1].find("}")
#			print family, index2
			ethn = family[1][0:index2].split(",")
#			ethn = family[1].replace("}","").split(",")
#			ethn = family[1].replace("}\r\n","").split(",")
			extinct_flag = False
#			if re.search("ARTIFICIAL|Cre|Mixed|Ain", family[0]):
#			if re.search("Cre|Mixed|Ain", family[0]):
#				extinct_flag = True
#			if re.search("Creole|isolate", family[1]):
#				extinct_flag = True
#			if ethn[0] == '':
#				extinct_flag = True
		elif row.startswith(" ") and extinct_flag == False:
#			print row
			extinct_age = 0
			line = row[1:]
			line = line.split()
			extinct = row[18:30]
			iso = row[39:42]
			wals = row[33:36]
			wals = wals.replace(" ","")
			iso = iso.replace(" ","")
			if lang_details.find(search_key) > -1:
				ethnlangdict[search_key].append(lang)
			else:
				extinct_flag = True
#			print iso
#			if iso in ethndeliso:
#				extinct_flag = True
#			if walsisodict.has_key(wals) or len(wals) == 0:
#				extinct_flag = True
#			print extinct
#			extinct = extinct.lstrip()
#			if extinct == '':
#				extinct_age = 0
#			else:
#				extinct_age = int(extinct) 
#			if extinct_age >= -1701 and extinct_age < -1:
#				extinct_flag = True
#				continue			
#			if len(line) >= 4:
##				print lang, line[EXTINCT_INDEX]
#				if int(line[EXTINCT_INDEX]) >= -1701 and int(line[EXTINCT_INDEX]) < -1:
#					extinct_flag = True 
#					print lang," is extinct"
#					continue
#			for langgrp in ethndatedict.keys():
#				keywords = ethndatedict[langgrp].split(";")
#				for keyword in keywords:
#					if lang_details.find(keyword) > -1:
#						ethnlangdict[langgrp].append(lang)
#						if ethnisodict.has_key(langgrp):
#							if iso not in ethnisodict[langgrp]:
#								ethnisodict[langgrp].append(iso)
#						else:
#							ethnisodict[langgrp].append(iso)
#						print langgrp, lang
#			print extinct_flag
		elif row[0].isdigit() and extinct_flag is False:
			row = row.replace(", ",",")
			row = row.replace("//"," //")
			row = row.replace("(incl.)","")
			row = re.sub("\t{2,}", "\t", row)
			k = row.find("/")
			if k > -1:
				row = row[:k]
			rows = row.split("\t")
			items = rows[0].split()
			words = rows[1].split()
			key = items[0].lstrip("0")
			key = key.rstrip(".")
			value = items[1:]
			if key in concepts:
				alt = words[0].split(",")			
				altlist = []
				for word in alt:
					if word.find('%') == -1 and len(word) > 0:
						altlist.append(processASJPWord(word))
#				altlist.append(processASJPWord(word))
				if lang not in lists: lists[lang] = {}
				if key not in lists[lang]: lists[lang][key] = {}
				lists[lang][key] = altlist
	if 'X' in alphabet:
		global XXX
		codenumber = alphabet['X']
		XXX = [codenumber, codenumber, codenumber]
#	print XXX
#	print "alphabet---", alphabet
#	print "\n".join(map(str,alphabet.items()))
	globalvars.revalphabet = invertDict(alphabet)
	globalvars.revalphabet['_'] = '_'
#	revalphabet = revalphabet
#	print "revalphabet---", globalvars.revalphabet
#	return (lists, ethnlangdict)
#	print ethnisodict
#	print lists
	return (lists, ethnisodict)


def readASJPdatesfile(f):
	"""Read the ASJP Dates file for a specified Calibration points file where
	the search key for each language gives the list.
	"""
	lists = {}
	ethn = ""
	lang = "xxx"
	extinct_flag = False
	#Discard the first two lines
	f.readline()
	f.readline()
	#the first loop reads concept words
	for row in f:
		items = row.split()
		if not items:
			break
		concepts[items[0]] = items[1]
	#the second loops reads and discards the phonemes
	for row in f:
		if not row.split():
			break
	#the third loop reads language data
	for row in f:		
		if not row.split():
			continue
		if row[0].isupper():
			row = cleanRow(row)
			index = row.find("{")
			lang_details = row
			lang = row[0:index]
			family = row[index+1:].split('|')
			index2 = family[1].find("}")
#			print family, index2
			ethn = family[1][0:index2].split(",")
#			ethn = family[1].replace("}","").split(",")
#			ethn = family[1].replace("}\r\n","").split(",")
			extinct_flag = False
			if re.search("ARTIFICIAL|Cre|Mixed|Ain", family[0]):
#			if re.search("Cre|Mixed|Ain", family[0]):
				extinct_flag = True
			if re.search("Creole|isolate", family[1]):
				extinct_flag = True
			if ethn[0] == '':
				extinct_flag = True
		elif row.startswith(" ") and extinct_flag == False:
#			print row
			extinct_age = 0
			line = row[1:]
			line = line.split()
			extinct = row[18:30]
			iso = row[39:42]
			wals = row[33:36]
			wals = wals.replace(" ","")
			iso = iso.replace(" ","")
#			print iso
#			if iso in ethndeliso:
#				extinct_flag = True
#			if walsisodict.has_key(wals) or len(wals) == 0:
#				extinct_flag = True
#			print extinct
			extinct = extinct.lstrip()
			if extinct == '':
				extinct_age = 0
			else:
				extinct_age = int(extinct) 
			if extinct_age >= -1701 and extinct_age < -1:
				extinct_flag = True
				continue			
#			if len(line) >= 4:
##				print lang, line[EXTINCT_INDEX]
#				if int(line[EXTINCT_INDEX]) >= -1701 and int(line[EXTINCT_INDEX]) < -1:
#					extinct_flag = True 
#					print lang," is extinct"
#					continue
			for langgrp in ethndatedict.keys():
				keywords = ethndatedict[langgrp].split(";")
				for keyword in keywords:
					if lang_details.find(keyword) > -1:
						ethnlangdict[langgrp].append(lang)
						if ethnisodict.has_key(langgrp):
							if iso not in ethnisodict[langgrp]:
								ethnisodict[langgrp].append(iso)
						else:
							ethnisodict[langgrp].append(iso)
#						print langgrp, lang
#			print extinct_flag
		elif row[0].isdigit() and extinct_flag is False:
			row = row.replace(", ",",")
			row = row.replace("//"," //")
			row = row.replace("(incl.)","")
			row = re.sub("\t{2,}", "\t", row)
			k = row.find("/")
			if k > -1:
				row = row[:k]
			rows = row.split("\t")
			items = rows[0].split()
			words = rows[1].split()
			key = items[0].lstrip("0")
			key = key.rstrip(".")
			value = items[1:]
			if key in concepts:
				alt = words[0].split(",")			
				altlist = []
				for word in alt:
					if word.find('%') == -1 and len(word) > 0:
						altlist.append(processASJPWord(word))
#				altlist.append(processASJPWord(word))
				if lang not in lists: lists[lang] = {}
				if key not in lists[lang]: lists[lang][key] = {}
				lists[lang][key] = altlist
	if 'X' in alphabet:
		global XXX
		codenumber = alphabet['X']
		XXX = [codenumber, codenumber, codenumber]
#	print XXX
#	print "alphabet---", alphabet
#	print "\n".join(map(str,alphabet.items()))
	globalvars.revalphabet = invertDict(alphabet)
	globalvars.revalphabet['_'] = '_'
#	revalphabet = revalphabet
#	print "revalphabet---", globalvars.revalphabet
#	return (lists, ethnlangdict)
	print ethnisodict
	return (lists, ethnisodict)

def readEthnDatesfile(f):
	"""Reads the training file Ex. CalibrationPoints.txt
	"""
	header = f.readline().split('\t')
	for line in f:
		line = line.rstrip('\n')
		row = line.split('\t')
#		print row
		langgrp = row[0]
		keyword = row[1]
#		calibrdate = row[2]
		ethndatedict[langgrp] = keyword
	return ethndatedict

def invertDict(a):
	return nltk.Index((v,k) for k,v in a.items())

def makeNgramModels_1(wordlist):
	"""Given a word list decodes the codes into a alphabet. Extracts the character n-grams and with Padded and without padded symbols"""
	seqs = []
#	print wordlist
#	print XXX
	for seq in wordlist:
		if seq != XXX:
#			for ch in seq:
#				seqs.append(decodeTuple(ch))
			for i in range(1, nGramOrder + 1):
				for gram in ingrams(seq, i, pad_left=False, pad_right=False, pad_symbol='_'):
#				for gram in ingrams(seq, i, pad_left=True, pad_right=True, pad_symbol='_'):
					seqs.append(decodeTuple(gram))
#		else:
#			pass
#			print "The removed word is", str(seq)
#	print seqs
	return seqs

def freqmodels_1(ls):
	"""Builds the frequency models for the ls matrix
	"""
	cfd = nltk.ConditionalFreqDist((ka, seq) for ka, va in ls.iteritems() for value, wordlist in va.iteritems() for seq in makeNgramModels_1(wordlist))
	return cfd

def decodeTuple(tup):
	"""Decodes a tuple from code numbers into a alphabet tuple
	"""
#	print revalphabet #using global revalphabet
	if type(tup) is tuple:
		stra = "-".join([k for a in tup for k in globalvars.revalphabet[a]])
	elif type(tup) is int:
		print "stra--int--", tup
		stra = revalphabet[tup]
	return stra

def getMeanGrams(ls):
	cpd = freqmodels_1(ls)
	cfd = nltk.ConditionalFreqDist((langs,len(sample.split("-"))) for langs in cpd.conditions() for sample in cpd.__getitem__(langs).samples())
	for grp, langs in ethnlangdict.items():
		sum_1 = 0.0
		sum_2 = 0.0
		sum_3 = 0.0
		sum_4 = 0.0
		sum_5 = 0.0
		for lang in ls.iterkeys():
			sum_1 += cfd[lang][1]
			sum_2 += cfd[lang][2]
			sum_3 += cfd[lang][3]
			sum_4 += cfd[lang][4]
			sum_5 += cfd[lang][5]
			print lang, cfd[lang][1], cfd[lang][2], cfd[lang][3], cfd[lang][4], cfd[lang][5]
		print grp, len(langs), sum_1/len(langs), sum_2/len(langs), sum_3/len(langs), sum_4/len(langs), sum_5/len(langs)
#		print grp, sum, len(langs), sum*1.0/len(langs)

def getFamStats(ls):
	"""Get the n-gram diversity with modifier on"""
#	fout = open("/home/rarakar/work/ASJP-Dates/src/Ethnologue-full-stats13-1.txt","w")
	fout = open("/home/rarakar/work/ASJP-Dates/src/Family-stats13-artificial.txt","w")
	cfd = nltk.ConditionalFreqDist()
	print "Ngram Order", nGramOrder
	for grp, langs in ethnlangdict.iteritems():
		if len(langs) < 2:
			continue
#		print grp, langs
		for lang in langs:
			for concept, wordlist in ls[lang].iteritems():
				for seq in makeNgramModels_1(wordlist):
					cfd[grp].inc(seq)
#	cfd.tabulate()
	cpd = nltk.ConditionalFreqDist((grp,len(sample.split("-"))) for grp in cfd.conditions() for sample in cfd.__getitem__(grp).samples())
#	cpd.tabulate()
#	print cpd['Brythonic'][1]
	fout.write("Language group\tNo of languages\t"+"\t".join(map(str, range(1,nGramOrder+1)))+"\n")
	for fam in sorted(ethnlangdict.iterkeys()):
		if len(ethnlangdict[fam]) < 2:
			continue
		fout.write(fam+"\t"+str(len(ethnisodict[fam]))+"\t")
		a = []
		for i in range(1,nGramOrder+1):
			a.append(str(cpd[fam][i]))
		fout.write("\t".join(a)+"\n")
	fout.close()
	return

def fileToDM(f):
	lists = {}
	lang = "xxx"
	#Discard the first two lines
	f.readline()
	f.readline()
	#the first loop reads concept words
	for row in f:
		items = row.split()
		if not items:
			break
		concepts[items[0]] = items[1]
	#the second loops reads and discards the phonemes
	for row in f:
		if not row.split():
			break
	#the third loop reads language data
	for row in f:
#		print row
		if row[0].isupper():
			index = row.find("{")
			lang = row[0:index]
#			print lang
		elif row[0].isdigit():
			row = row.replace(", ",",")
			row = row.replace("//"," //")
			k = row.find("/")
			if k > -1:
				row = row[:k]
			row = row.replace("(incl.)","")
			row = re.sub("\t{2,}", "\t", row)
			row = row.replace(", ",",")
			items = row.split()
			key = items[0].lstrip("0")
			value = items[1]
			if key in concepts:
				if len(items) < 3:
					print items
				alt = items[2].split(",")
				altlist = []
				for word in alt:
					if word.find('%') == -1 and len(word) > 0:
						altlist.append(processASJPWord(word))
				if lang not in lists: lists[lang] = {}
				if key not in lists[lang]: lists[lang][key] = {}
				lists[lang][key] = altlist
	if 'X' in alphabet:
		global XXX
		codenumber = alphabet['X']
		XXX = [codenumber, codenumber, codenumber]
	print alphabet
#	print "\n".join(alphabet.iterkeys())
	return lists


def readASJPEthn(f):
	"""This program reads ASJP listss13 file and gets the Ethnologue information and
	stores the languages list
	"""
	lists = {}
	ethn = ""
	lang = "xxx"
	extinct_flag = False
	ethn_dates = ""
	#Discard the first two lines
	f.readline()
	f.readline()
	#the first loop reads concept words
	for row in f:
		items = row.split()
		if not items:
			break
		concepts[items[0]] = items[1]
	#the second loops reads and discards the phonemes
	for row in f:
		if not row.split():
			break
	#the third loop reads language data
	for row in f:		
		if not row.split():
			continue
		if row[0].isupper():
			row = cleanRow(row)
			index = row.find("{")
			lang_details = row
			lang = row[0:index]
			family = row[index+1:].split('|')
			index2 = family[1].find("}")
#			print family, index2
			ethn = family[1][0:index2].split(",")			
#			ethn = family[1].replace("}","").split(",")
#			ethn = family[1].replace("}\r\n","").split(",")
			extinct_flag = False
			if re.search("ARTIFICIAL|Cre|Mixed|Ain", family[0]):
				extinct_flag = True
			if re.search("Creole|isolate", family[1]):
				extinct_flag = True
			if ethn[0] == '':
				extinct_flag = True
			ethn_dates = ethn[0:4]
		elif row.startswith(" ") and extinct_flag == False:
#			print row
			extinct_age = 0
			line = row[1:]
			line = line.split()
			extinct = row[18:30]
			iso = row[39:42]
			wals = row[33:36]
			wals = wals.replace(" ","")
#			if iso in ethndeliso:
#				extinct_flag = True
#			if walsisodict.has_key(wals) or len(wals) == 0:
#				extinct_flag = True
#			print extinct
			extinct = extinct.lstrip()
			if extinct == '':
				extinct_age = 0
			else:
				extinct_age = int(extinct) 
			if extinct_age >= -1701 and extinct_age < -1:
				extinct_flag = True
				continue
			for m in range(1,len(ethn_dates)+1):
				ethnlangdict[",".join(ethn_dates[0:m])].append(lang)
#			if len(line) >= 4:
##				print lang, line[EXTINCT_INDEX]
#				if int(line[EXTINCT_INDEX]) >= -1701 and int(line[EXTINCT_INDEX]) < -1:
#					extinct_flag = True 
#					print lang," is extinct"
#					continue
#			for langgrp in ethndatedict.keys():
#				keywords = ethndatedict[langgrp].split(";")
#				for keyword in keywords:
#					if lang_details.find(keyword) > -1:
#						ethnlangdict[langgrp].append(lang)
#						print langgrp, lang
#			print extinct_flag
		elif row[0].isdigit() and extinct_flag is False:
			row = row.replace(", ",",")
			row = row.replace("//"," //")
			row = row.replace("(incl.)","")
			row = re.sub("\t{2,}", "\t", row)
			k = row.find("/")
			if k > -1:
				row = row[:k]
			rows = row.split("\t")
			items = rows[0].split()
			words = rows[1].split()
			key = items[0].lstrip("0")
			key = key.rstrip(".")
			value = items[1:]
			if key in concepts:
				alt = words[0].split(",")			
				altlist = []
				for word in alt:
					if word.find('%') == -1 and len(word) > 0:
						altlist.append(processASJPWord(word))
#				altlist.append(processASJPWord(word))
				if lang not in lists: lists[lang] = {}
				if key not in lists[lang]: lists[lang][key] = {}
				lists[lang][key] = altlist		
	if 'X' in alphabet:
		global XXX
		codenumber = alphabet['X']
		XXX = [codenumber, codenumber, codenumber]
#	print XXX
#	print "alphabet---", alphabet
#	print "\n".join(map(str,alphabet.items()))
	globalvars.revalphabet = invertDict(alphabet)
	globalvars.revalphabet['_'] = '_'
#	revalphabet = revalphabet
#	print "revalphabet---", globalvars.revalphabet
	return (lists, ethnlangdict)