#coding=utf-8
import codecs
import collections
import json
import os, re
import os.path
import sys
import traceback
import glob
from bs4 import BeautifulSoup
import hashlib
import html2text



means_map = {}

html2text2 = html2text.HTML2Text()
html2text2.ignore_links = True
html2text2.ignore_images = True
html2text2.ignore_emphasis = True
#====================================
def parse_html(mdx_file):
    tf = open(mdx_file, 'r',encoding='utf8')
    output = open(mdx_file+".out.md", 'w',encoding='utf8')
    for line in tf.readlines():
        try:
            m = re.search("^@@==@@\|(?P<word>[^\|]+)\|(?P<source>[^\|]+)\|(?P<html>.+)$", line)
            if m:
                markdown = m.group("html")
                #print(markdown)
                markdown = html2text2.handle(markdown)
                markdown = markdown.strip()
                if len(markdown) < 1:
                    continue
                markdown = markdown.replace("\r\n", "\\n").replace("\n", "\\n")
                #print(markdown)

                # 去掉重复项
                word = m.group("word")
                h = hashlib.sha256(word.encode("utf8")).hexdigest()+hashlib.sha256(markdown.encode("utf8")).hexdigest() + hashlib.md5(markdown.encode("utf8")).hexdigest()
                oldword = means_map.get(h)
                if oldword:
                    #print("***************"+  word)
                    continue
                means_map[h] = True

                source = m.group("source")

                output.write("{}|{}|{}\n".format(word,source,markdown))
        except:
            print(traceback.format_exc())

    tf.close()
    output.close()

files=glob.glob("G:\\d\\*.txt")
for f in files:
    parse_html(f)

    pass


