﻿import sys
import libxml2
import Dumper
import re

SET_DEFINATIONS = {}
SET_DEFINATIONS['title'] = 'Oxford 3000 Definations'
SET_DEFINATIONS['split'] = {'factor':range(50, 51)}
SET_DEFINATIONS['template'] =     {
    'description' : 'Oxford 3000 Definations',
    'tags' : 'Oxford3000, Defination',
    'words_lang' : 'en',
    'defs_lang' : 'en'
}
SET_DEFINATIONS['wrapper'] = 'definationWrapper'


SET_EXAMPLES = {}
SET_EXAMPLES['title'] = 'Oxford 3000 Examples'
SET_EXAMPLES['split'] = {'factor':range(50, 51)}
SET_EXAMPLES['template'] =     {
    'description' : 'Oxford 3000 Examples',
    'tags' : 'Oxford3000, Example',
    'words_lang' : 'en',
    'defs_lang' : 'en'
}
SET_EXAMPLES['wrapper'] = 'exampleWrapper'

SET_ETYMOLOGIES = {}
SET_ETYMOLOGIES['title'] = 'Oxford 3000 Etymologies'
SET_ETYMOLOGIES['split'] = {'factor':range(50, 51)}
SET_ETYMOLOGIES['template'] =     {
    'description' : 'Oxford 3000 Etymologies',
    'tags' : 'Oxford3000, Etymology',
    'words_lang' : 'en',
    'defs_lang' : 'en'
}
SET_ETYMOLOGIES['wrapper'] = 'etymologyWrapper'

SET_WORDS = {}
SET_WORDS['title'] = 'Oxford 3000 Words'
SET_WORDS['split'] = {'factor':range(50, 51)}
SET_WORDS['template'] =     {
    'description' : 'Oxford 3000 Words, Phrases and Idioms',
    'tags' : 'Oxford3000, Words',
    'words_lang' : 'en',
    'defs_lang' : 'la'#ipa
}
SET_WORDS['wrapper'] = 'wordWrapper'


def getPos(core, part, dom):
    pos = ''
    if core.name == 'dr-g':
        for p in core.xpathEval('.//z_p'):#pos
            pos += p.getContent().strip()
    else:
        for p in part.xpathEval('.//block-g'):#pos
            pos = p.getContent().strip()
    if pos == '':
        for p in dom.xpathEval('/entry/h-g/top-g//z_p'):#pos
            pos = p.getContent().strip()
    if pos == '':
        for p in dom.xpathEval('/entry/h-g/z_p'):#pos
            pos = p.getContent().strip()
    """
    if pos == '':
        for p in part.xpathEval('.//@wd'):
            print "nopos: " + p.getContent()
            break
    """
    return pos.strip()

def getIPAs(core, part, dom, force):
    ipas = []
    if core.name == 'dr-g' and force == False:
        ipa_nodes = core.xpathEval('.//ei-g')
        for ipa_node in ipa_nodes:
            for ipa in ipa_node.xpathEval('./i|y'):
                ipa = ipa.getContent().strip()
                if ipa != '':ipas.append(ipa)
    else:
        ipa_nodes  = dom.xpathEval('/entry/h-g/top-g/ipa_block/ei-g')
        ipa_nodes += dom.xpathEval('/entry/h-g/top-g/core_block/ei-g')
        if part.name != 'h-g':
            ipa_nodes += part.xpathEval('./ei-g')
            ipa_nodes += part.xpathEval('./label_block/ei-g')
        #ipa_nodes += part.xpathEval('./top-g/ipa_block/ei-g')
        for ipa_node in ipa_nodes:
            for ipa in ipa_node.xpathEval('./i|y'):
                ipa = ipa.getContent().strip()
                if ipa != '':ipas.append(ipa)
    """
    if len(ipas) == 0:
        for p in part.xpathEval('.//@wd'):
            print "noipa: " + p.getContent()
            break
    """
    for i in range(len(ipas)-1, -1, -1):
        if ipas.index(ipas[i]) != i: del ipas[i]
    return ipas
r = re.compile('\s+')
def getT(core, root_name, xpath):
    term = []
    termNodes = None
    if core.parent.name == root_name: termNodes = core.parent
    if core.name == root_name: termNodes = core
    if termNodes:
        for t in termNodes.xpathEval(xpath):
            for img in t.xpathEval('.//img'):
                img.addNextSibling(libxml2.newText(' '))
            term.append(t.getContent().strip())
    return term
def getTerm(core):
    global r
    term = []
    term += getT(core, 'id-g', './id')
    term += getT(core, 'pv-g', './pv')
    term += getT(core, 'dr-g', './dr|zd')
    term = map(lambda t:' '.join(r.split(t)), term)
    term = ', '.join(term)
    return term

def getDefination(core):
    defination = []
    for d in core.xpathEval('.//label_block'):
        if d.getContent().strip() != '' : defination.append(d.getContent().strip())
    for d in core.xpathEval('./def_block'):
        defination.append(d.getContent().strip())
    for d in core.xpathEval('./dc'):
        defination.append('(' + d.getContent().strip() + ')')
    for d in core.xpathEval('./u'):
        defination.append('(' + d.getContent().strip() + ')')
    for d in core.xpathEval('./cf'):
        defination.append(d.getContent().strip() + ':')
    for d in core.xpathEval('./d'):
        defination.append(d.getContent().strip())
    for d in core.xpathEval('./ud'):
        defination.append(d.getContent().strip())
    for d in core.xpathEval('./xr'):
        defination.append(d.getContent().strip())
    return ' '.join(defination).strip()

def getExamples(core, word):
    examples = []
    for x in core.xpathEval('./x-g'):#example
        example = ''
        for t in x.xpathEval('./cf'):
            example += t.getContent().strip().replace('~', clearStr(word))+': '
        for t in x.xpathEval('./x'):
            example += t.getContent().strip()
        examples.append(example)
    return examples

def getWord(dom):
    for head in dom.xpathEval('/entry/h-g'):
        for w in head.xpathEval('.//z_core_h'):
            word = w.getContent().strip()
    return word

def getSub(name, part):
    sub = []
    subName = part.xpathEval('.//'+name)
    for i in range(len(subName)-1, -1, -1):
        n = subName[i]
        ng = n.xpathEval('./n-g')
        if len(ng) != 0:
            sub += ng
            subName.remove(n)
    sub += subName
    return sub

def getList(part):
    sub = []
    sub += part.xpathEval('./n-g')
    sub += getSub('sd-g', part)
    sub += getSub('pv-g', part)
    sub += getSub('id-g', part)
    if len(sub) == 0: sub += [part]
    ll = getSub('dr-g[@core="y"]', part)
    sub += getSub('dr-g[@core="y"]', part)
    return sub

def isCore(core, force=False):
    if force: return True
    root = core
    while(True):
        if root.name == 'p=g': return False
        if root.name == 'h-g': return False
        if root.name == 'entry': return False
        if root.xpathEval('@core="y"') == 1: return True
        root = root.parent
    return False
    
def getForceCore(l, p, d, part):
    if l == 1: return True
    if l == p+d+1: return True
    if part.xpathEval('@core="y"'): return True
    return False

def clearStr(s):
    return s.replace('•', '').replace('ˌ','').replace('ˈ','').strip()

def getEtymology(part, dom):
    etymology = ''
    for e in part.xpathEval('.//etymology'):
        for h in e.xpathEval('.//h'): e.replaceNode(h)
        #for h in e.xpathEval('.//hm'): e.replaceNode(h)
        etymology = e.getContent().strip()
    if etymology == '':
        for e in dom.xpathEval('/entry/top-g//etymology'):
            for h in e.xpathEval('.//h'): e.replaceNode(h)
            #for h in e.xpathEval('.//hm'): e.replaceNode(h)
            etymology = e.getContent().strip()
    return etymology

def dumpItems(items, out):
    for item in items:
        if item.get('pos') != None:
            out.write("%s /%s/ %s\r\n" % (item['word'], ','.join(item['ipas']), item['pos']))
        elif item.get('etymology') != None:
            out.write("%s: %s\r\n" % (item['word'], item['etymology']))
        else:
            out.write("%s\r\n" % item['ipas'])
        if item.get('defination') : out.write("%s\r\n" % item['defination'])
        if item.get('examples') : 
            for example in item['examples']:
                out.write("\t%s\r\n" % example)
        out.write('\r\n')
    out.write('-----------------------------\r\n\r\n')

def getCard(front, back):
    return {'front':unicode(front, 'utf-8'), 'back':unicode(back, 'utf-8')}

def etymologyWrapper(items):
    cards = []
    for item in items:
        if not item.get('etymology'): continue
        front = clearStr(item['word'])
        back = item['etymology']
        cards.append(getCard(front, back))
    return cards

def definationWrapper(items):
    cards = []
    for item in items:
        if item.get('etymology'): continue
        front = clearStr(item['word'])
        back = item['defination'] if item.get('defination') else ''
        if item.get('pos') : back = "[%s]\n%s" % (item.get('pos'), back)
        cards.append(getCard(front, back))
    return cards

def exampleWrapper(items):
    cards = []
    for item in items:
        if not item.get('examples'): continue
        front = clearStr(item['word'])
        for back in item['examples']:
            cards.append(getCard(front, back))
    return cards

def wordWrapper(items):
    cards = []
    for item in items:
        if item.get('etymology'): continue
        if item.get('pos'):
            front = clearStr(item['word'])
            l = len(item['ipas'])
            if l == 0:
                back = item['word']
            elif l == 1:
                back = '/%s/' % ''.join(item['ipas'])
            elif l == 3:
                back = '\n'.join(map(lambda x:'/%s/'%x, item['ipas']))
            else:
                try:
                    if front == 'often': raise Exception()
                    [2,4,6,8].index(l)
                    back = (l/2)
                    back = ['/%s, %s/'] * back
                    back = '\n'.join(back)
                    back = back % tuple(item['ipas'])
                except:#5,6
                    back  = '/'
                    back += ', '.join(item['ipas'][:3])
                    back += '/\n/'
                    back += ', '.join(item['ipas'][3:])
                    back += '/'
            cards.append(( front, back ))
        else:
            for word in item['word'].split(', '):
                cards.append((clearStr(word), word))
    ser = []
    for card in cards: ser.append('%s|%s' % card)
    ser = list(set(ser))
    cards = []
    for s in ser:
        cards.append(getCard(
            s[:s.index('|')],
            s[s.index('|')+1:]
        ))
    return cards
    
def dump(setConfig, headwords, dumperType='file'):
    decks = Dumper.splitWordsSet(setConfig['split']['factor'], headwords)
    f = getattr(__import__(__name__), setConfig['wrapper'])
    if dumperType=='file':
        dumper = Dumper.FileDumper(setConfig['title'])
    else:
        dumper = Dumper.QuizletDumper('front', 'back', setConfig.get('template'))
    for i in range(len(decks)):
        headwords = decks[i]
        title = "%s %d" % (setConfig['title'], i+1)
        cards = []
        for items in headwords:
            cards += f(items)
        Dumper.dumpWords(title, cards, dumper)

def handleOALD8():
    f = open('''D:\Code\hg_local\english-learning-tools\grabber\input\oald8.index''')
    headwords = []
    skipped = []
    nodef = []
    noitem = []
    category = {}
    count = 0
    countDef = 0
    countExamples = 0
    countEtymology = 0
    for line in f.readlines():
        config = line.strip().split('\t')
        if len(config) == 2: continue
        filename, word, freqs = config
        try:
            freqs.index('core')
        except ValueError as e:
            continue
        count +=1
        x = open('''E:\OALD8_Depress\\fs.skn\\files.skn\\'''+filename+'.xml')
        content = x.read()
        x.close()
        #print config
        try:
            dom = libxml2.parseDoc(content)
        except:
            print filename
        word = getWord(dom)
        """
        for s in dom.xpathEval('//side-panel/@type'):
            s = s.getContent()
            #if s == 'word_family': print word, s
            #if s == 'vocab': print word, s #vocabulary building
            #if s == 'grammar': print word, s
            #if s == 'british_american': print word, s
            #if s == 'more_about': print word, s
            #if s == 'synonyms': print word, s #synonyms
            #if s == 'synald7': print word, s #synonyms
            'verb_forms', 'word_family', 'vocab'
            'which_word', 'grammar', 'british_american'
            'thesaurus', 'synonyms', 'synald7'
            'colloc', 'langbank', 'example_bank',
            'culture', 'more_about'
        """
        cores = dom.xpathEval('//*[@core="y"]')
        l = len(cores);
        cores = dom.xpathEval('//hm[@core="y"]')
        h = len(cores);
        l = l-h
        cores = dom.xpathEval('//p-g[@core="y"]')
        p = len(cores);
        cores = dom.xpathEval('//dr-g[@core="y"]')
        d = len(cores);
        parts = dom.xpathEval('/entry/p-g')
        if len(parts) == 0 : parts = dom.xpathEval('/entry/h-g')
        items = []
        for part in parts:
            cores = getList(part)
            """
            print word,
            for core in cores: print core.name,
            print ''
            """
            needs = []
            for core in cores:
                force = getForceCore(l, p, d, part)
                cored = isCore(core, force)
                #print unicode(word, 'utf-8'), force, core.name, cored
                if cored == False: continue
                needs.append(core)
            if len(needs) != 0:
                etymology = getEtymology(part, dom)
                if etymology != '':
                    countEtymology +=1
                    item = {'word':word, 'etymology':etymology}
                    items.append(item)
            for core in needs:
                pos = getPos(core, part, dom)
                term = getTerm(core)
                ipas = getIPAs(core, part, dom, term == word)
                defination = getDefination(core)
                examples = getExamples(core, word)
                countExamples += len(examples)
                if defination == '' :
                    if core.name == 'dr-g' or word == 'pole':
                        None
                    else:
                        print core
                        sys.exit()
                        nodef.append(word)
                if core.name == 'dr-g' :
                    item = {'word':term, 'pos':pos, 'ipas':ipas, 'defination':defination, 'examples':examples}
                elif term == '':
                    item = {'word':word, 'pos':pos, 'ipas':ipas, 'defination':defination, 'examples':examples}
                else:
                    item = {'word':term, 'ipas':term, 'defination':defination, 'examples':examples}
                items.append(item)
        countDef += len(items)
        headwords.append(items)
        if len(items) == 0 : noitem.append(word)
    f.close()
    print count
    print countEtymology
    print countDef
    print countExamples
    print len(nodef),
    print unicode(' nodef:'+', '.join(nodef), 'utf-8')
    print len(noitem),
    print unicode(' noitems:'+', '.join(noitem), 'utf-8')
    return headwords

if __name__ == '__main__':
    headwords = handleOALD8()
    #---dumper--------------------------------------
    out = open('oald8_core.txt', 'wb')
    for items in headwords: dumpItems(items, out)
    out.close()
    dumperType = 'quizlet'
    dumperType = 'file'
    dump(SET_WORDS, headwords, dumperType)
    dump(SET_DEFINATIONS, headwords, dumperType)
    dump(SET_ETYMOLOGIES, headwords, dumperType)
    dump(SET_EXAMPLES, headwords, dumperType)
