"""
all functions assume UTF-8 encoding and unicode strings
"""

import codecs
import locale
import types
from nltk_lite.probability import FreqDist, DictionaryConditionalProbDist, DictionaryProbDist
locale.setlocale( locale.LC_CTYPE , 'greek') 
locale.setlocale( locale.LC_COLLATE  , 'greek')


def cpd_from_dict(dict, normalize = False):
    
    conditions = {}
    
    for key in dict:
        assert (type(key)==types.TupleType)
        (prev, next) = key
        # initialize the dictionaries as needed
        conditions[prev] = conditions.get(prev, {})
        conditions[prev][next] = dict[key]
    
    for condition in conditions:
        # replace the dictionaries with DictionaryProbDists
        conditions[condition] = DictionaryProbDist(conditions[condition], normalize = normalize)
    
    return DictionaryConditionalProbDist(conditions)
        
        
        
    
    

def cpd(array, conditions, observations):
    d = {} 
    for values, condition in zip(array, conditions):
        d[condition] = pd(values, observations) 
    return DictionaryConditionalProbDist(d)



def pd(values, observations):
     d = {} 
     for value, item in zip(values, observations):
         d[item] = value 
     return DictionaryProbDist(d)


def print_words(words, filename='out.txt'):
    file = open(filename, mode='w')
    file.write(codecs.BOM_UTF8)
    for w in words.sorted_samples():
        file.write(w.encode('utf-8'))
        file.write('-')
        file.write(str(words.count(w)))
        file.write('-')
        file.write('(')
        file.write(str(words.freq(w)))
        file.write(')')
        file.write('\n')
    file.flush
    file.close
    print ' Total %d words, %d unique' % (words.N(), words.B())

import re
def read_words(filename, words=FreqDist()):
    file = open(filename, 'rb')
    contents = file.read() #.decode('utf-8')
    if contents.startswith('\xef\xbb\xbf'):
        contents = contents[3:] #strip BOM       
    contents = unicode(contents, 'utf-8')
    return _read_words(contents, words)

def _read_words(contents, words):
    pattern = re.compile(r'(\w+)-(\d+)-(.+)', re.UNICODE)
    #print repr(contents)
    for line in contents.splitlines():
        #print repr(line)
        #print repr(line.split('-'))
        m = pattern.match(line)
        word = m.group(1)
        count = m.group(2)
        #(word, count, freq) =line.split('-')
        words.inc(word, int(count))
    print ' Total %d words, %d unique' % (words.N(), words.B())
    return words

def refineWords(words):
    words2 = FreqDist()
    for w in words.samples():
        if isGreek(w):
            words2.inc(w,words.count(w))
    return words2

def isGreek(word):
    start1=0x0370
    end1=0x03ff
    start2=0x1f00
    end2=0x1fff
    greek_chars =  [c for c in word if ((ord(c)>=start1 and ord(c)<=end1)or(ord(c)>=start2 and ord(c)<=end2))]
    return len(greek_chars)==len(word)
        

import os, fnmatch
def all_files(root, patterns='*', single_level=False, yield_folders=False):
    # Expand patterns from semicolon-separated string to list
    patterns = patterns.split(';')
    for path, subdirs, files in os.walk(root):
        if yield_folders:
            files.extend(subdirs)
        files.sort( )
        for name in files:
            for pattern in patterns:
                if fnmatch.fnmatch(name, pattern):
                    yield os.path.join(path, name)
                    break
        if single_level:
            break

        
