﻿import os
import sys
import Dumper
import xml.dom.minidom
import re

COL_GSL = 'GSL'
COL_WORD = 'word'
COL_DEF = 'def'
COL_IPA = 'IPA'
COL_IDX = 'IDX'
SPLIT_FACTOR = range(50, 51)

SET_WORD_GSL={}
SET_WORD_GSL['file'] = Dumper.encodeStr('input\\wordlist_basic.xml')
SET_WORD_GSL['parser'] = Dumper.encodeStr('GSL_TW_Parser')
SET_WORD_GSL['title'] = Dumper.encodeStr('GSL 中文釋義')
SET_WORD_GSL['split'] = {'factor':SPLIT_FACTOR}
SET_WORD_GSL['template'] = {'tags' : 'English, EFL, ESL, GSL', 'description':'中正大學語言中心'}

def GSL_TW_Parser(content):
    doc = xml.dom.minidom.parseString(content.encode('utf-8'))
    columns = []
    columns.append({28:COL_IDX,29:COL_IDX,34:COL_IDX,72:COL_WORD, 172:COL_DEF})
    columns.append({309:COL_IDX, 352:COL_WORD, 452:COL_DEF})
    reg = re.compile('^[0-9]+')
    pages = doc.getElementsByTagName('page')
    words = []
    for p in range(len(pages)):
        #print '---------------------------------------------'
        page = pages[p]
        textboxs = page.getElementsByTagName('textbox')
        # collect textbox info into nodes
        nodes = []
        for textbox in textboxs:
            txt = Dumper.getText(textbox).strip()
            postion = textbox.getAttribute('bbox')
            (left, top, right, bottom)  = map(int, map(round, map(float, postion.split(','))))
            if txt :
                try:
                    txt.index(Dumper.encodeStr('本份單字列表由國立中正大學語言中心整理'))
                except ValueError:
                    nodes.append((txt, left, top))
        #group nodes by column
        for c in range(len(columns)):
            column = columns[c]
            # split column by index, word, def
            grouped = {COL_IDX:[],COL_WORD:[],COL_DEF:[]}
            for node in nodes:
                (txt, left, top) = node
                try:
                    col = column[left]
                    grouped[col].append(node)
                except KeyError:
                    None
            (txt, left, top) = range(len(nodes[0]))
            for col in grouped:
                grouped[col].sort(None, lambda x:x[top]) #sort by top
                #print "page %d column %d %s : %d" % (p+1, c+1, col, len(grouped[col]))
            # colwords colletion
            colwords=[]
            if p == 0 and c == 1: # fix page1 irregualr node
                grouped[COL_DEF] = grouped[COL_DEF][1:]
                colwords.append({COL_IDX:38,COL_WORD:Dumper.encodeStr('afford'),COL_DEF:Dumper.encodeStr('買得起')})
                colwords.append({COL_IDX:76,COL_WORD:Dumper.encodeStr('anger'),COL_DEF:Dumper.encodeStr('生氣')})
            # colwords colletion regular process
            wordIndex = 0
            for i in range(len(grouped[COL_DEF])):
                txt_def= grouped[COL_DEF][i][txt]
                tmp = grouped[COL_IDX][i][txt].strip()
                txt_idx = reg.findall(tmp)[txt]
                txt_word = tmp[tmp.index(txt_idx)+len(txt_idx):].strip()
                if len(txt_word) == 0:
                    txt_word = grouped[COL_WORD][wordIndex][txt]
                    wordIndex +=1
                txt_idx = int(txt_idx)
                colwords.append({COL_IDX:txt_idx,COL_WORD:txt_word,COL_DEF:txt_def})
            words +=colwords
    return words
    
def getSetByPDF(setConfig):
    f = open(setConfig['file'])
    content = Dumper.encodeStr(f.read())
    f.close()
    f = getattr(__import__(__name__), setConfig['parser'])
    words = f(content)
    #dumper = Dumper.FileDumper(setConfig['title'])
    #Dumper.subConfigOutput(None, setConfig, words, None, COL_WORD, dumper)
    dumper = Dumper.QuizletDumper(COL_WORD, COL_DEF, setConfig.get('template'))
    Dumper.subConfigOutput(None, setConfig, words, None, COL_WORD, dumper)

if __name__ == '__main__':
    getSetByPDF(SET_WORD_GSL)
