#! /usr/bin/python  
# -*- coding: utf-8-*-
"""
此文件负责依据vocabulary.txt，然后读取当前目录下的解释文件，
合并生成db.txt数据库
有任何问题，请联系baizhenxuan@gmail.com
"""
from  urllib import *
import os,sys 
from lxml import etree;
def getword(word):
    url="http://dict.cn/mini.php?q="+word;
    if(os.path.exists(word)):
        return;
    f=urlopen(url);
    s=f.read();
    print word
    open(word,"w+").write(s);
db=open("db.txt","w+");
def removeobjectAndscript(b):
    for i in b.getchildren():
        if i.tag.lower()=="object" or i.tag.lower()=="script":
            b.remove(i);
        else:
            removeobjectAndscript(i);
def removeroot(root):
    #print "root:",root.getchildren();
    h=root.getchildren()[0];
    root.remove(h);
    b=root.getchildren()[0];
    l=b.getchildren();
    b.remove(l[-1]); #移除最后一个div，内容是加入生词本，看详细解释
    removeobjectAndscript(b);
    h=etree.SubElement(root,"head");
    h.attrib['http-equiv']='Content-Type';
    h.attrib['content']='text/html; charset="utf-8"';
    root.insert(0,h);
def compressUseless(file):
    parser=etree.HTMLParser();
    root=etree.parse(file,parser);
    removeroot(root.getroot());
    return etree.tostring(root,encoding='utf-8',method="html");
tag="!$$-^";
def writedb(word):
    """
    合成以tag分割的文本文件
    对于html文件去掉不用的结点比如head，script，object等等，
    #同时移除最后一个的div结点
"""
    print 'write word:',word
    db.write(word+"\n");
    db.write(tag);
    try:
        s=compressUseless(open(word,"r"));
        db.write(s);
    except:
        print "exception:",word
        pass;
    db.write(tag);
if __name__=="__main__":
    f=open("vocabulary.txt","r");
    l=f.readlines();
    print len(l);
    #writedb('a');
    for i in l:
        writedb(i.strip());
    db.close();
