#!/usr/bin/env python
# -*- coding: utf-8 -*-
''' This module contains all the operations needed in extracting query-related
    docs.
'''

import os
import xml.sax
import xml.dom.minidom
from xml.dom.minidom import Node
from xml.parsers.expat import ExpatError
from objects.handler.queryhandler import QueryDocHandler, QueryFileHandler
from general.langconv import Converter
from general.generaltools import SystemTools
from general.generaltools import Debugger
from general.stringoperations import *

def extra_doc(**argv):
    error_record = {}
    debugger = Debugger(argv['debug'])

    docids, result = get_docids_from_queries(argv['queries_path'], isDebug=argv['debug'])
    error_record['get_entries_from_docids'] = result

    entries = get_entries_from_docids(docids, isDebug=argv['debug'])
    if argv['debug']:
        docids = open('docids')
        entries = open('entries')
    '''
    result = unzip_docs_to_raw(entries, argv['news_cmp'], argv['news_raw'],\
            argv['web_cmp'], argv['web_raw'], isDebug=argv['debug'])
    error_record['unzip_docs_to_raw'] = result

    result = repair_raw(argv['news_raw'], isDebug=argv['debug'])
    error_record['repair_news_raw'] = result

    result = repair_raw(argv['web_raw'], isDebug=argv['debug'])
    error_record['repair_web_raw'] = result
    '''
    result = extra_and_repair_doc(entries, docids, argv['news_raw'], argv['news_doc'],\
            argv['web_raw'], argv['web_doc'], isDebug=argv['debug'])
    error_record['extra_and_repair_doc'] = result

    if argv['debug']:
        docids.close()
        entries.close()

    debugger.checkError(error_record)

def get_docids_from_queries(queries_path, isDebug=False):
    '''Input path: Directory which contains XML query files.
       Output: The set which contains the non-repeated docids.
    '''
    parser = xml.sax.make_parser()
    handler = QueryFileHandler()
    parser.setContentHandler(handler)
    funct = parser.parse

    sys_tools = SystemTools(True)
    def limit(parent, file_name):
        if file_name.split('.')[-1] == 'xml':
            return True
        else:
            return False
    sys_tools.osWalk(queries_path, funct=funct, limit=limit, error=xml.sax._exceptions.SAXParseException)

    if isDebug:
        f = open('docids', 'w')
        for docid in handler.setting:
            f.write(docid + '\n')
        f.close()

    return handler.setting, sys_tools.errorRecord

def get_entries_from_docids(docids, isDebug=False):
    '''Input path is the file storing all query-related doc ids.
       This function parses doc id and gets entries of all the
       compressed file names in return.
    '''
    setting = set()
    for docid in docids:
        rdocid = docid.rstrip()
        entry = get_entry_from_docid(rdocid)
        setting.add(entry)

    if isDebug:
        f = open('entries', 'w')
        for entry in setting:
            f.write(entry + '\n')
        f.close()

    return setting

def unzip_docs_to_raw(entries, news_src="", news_dst="", \
        web_src="", web_dst="", isDebug=False):
    '''Input path is the file storing entries of all query-related files
       unzip all compressed files to proper directory.
    '''
    sys_tools = SystemTools(isDebug)
    for entry in entries:
        rentry = entry.rstrip()
        if rentry[:3] == "cmn":
            if web_src == "" or web_dst == "":
                continue
            prefix = web_src
            elements = rentry.split('-')
            norm_element = normalize_element(elements[2])

            file_path = guess_path(prefix, norm_element, elements)
            if file_path == "":
                sys_tools.add(rentry)

            dst_pre = web_dst
            sys_tools.customizedMkDir("%s/%s" % (dst_pre, norm_element))
            cmd = r"tar xf %s -C %s/%s/" % (file_path, dst_pre, norm_element)
            sys_tools.customizedCMD(cmd)

        else:
            if news_src == "" or news_dst == "":
                continue
            file_name = rentry.lower()
            sub_dir = file_name[:7]
            dst_pre = news_dst

            sys_tools.customizedMkDir(r"%s/%s" % (dst_pre, sub_dir))
            cmd = r"gunzip -c %s/%s/%s.gz > %s/%s/%s" % (news_src, sub_dir, file_name, dst_pre, sub_dir, file_name)
            sys_tools.customizedCMD(cmd)

    return sys_tools.errorRecord

def repair_raw(path, isDebug=False):
    funct = repair_raw_xml
    sys_tools = SystemTools(isDebug)
    sys_tools.osWalk(path, funct=funct, error=IOError)
    return sys_tools.errorRecord

def extra_and_repair_doc(entries, docids, news_src="", news_dst="", \
        web_src="", web_dst="", isDebug=False):
    '''Create all query-related documents.
    '''
    docid_dict = collectDocid(entries, docids)
    sys_tools = SystemTools(isDebug)
    for key in docid_dict:
        sys_tools.dPrint(key)
        if key[:3] == 'cmn':
            error_record = extra_and_repair_web(*docid_dict[key], src=web_src, dst=web_dst)
            sys_tools.errorRecord.update(error_record)
        else:
            error_record = extra_and_repair_news(*docid_dict[key], src=news_src, dst=news_dst)
            sys_tools.errorRecord.update(error_record)
    return sys_tools.errorRecord

def extra_and_repair_web(*docids, src="", dst=""):
    if not src or not dst or not docids:
        return
    converter = Converter('zh-hans')
    sys_tools = SystemTools(True)

    key = get_entry_from_docid(docids[0])
    elements = key.split('-')
    sub_dir_name = normalize_element(elements[2])

    src_pre = src
    xml_dir = src_pre + "/" + sub_dir_name + "/" + elements[2] +  "-" + elements[3]
    dst_pre = dst
    dst_dir = "%s/%s/%s-%s" % (dst_pre, sub_dir_name, elements[2], elements[3])
    sys_tools.customizedMkDir(dst_dir)
    for file_name in list(docids):
        print(file_name)
        cmd = "cp %s/%s.sgm %s" % (xml_dir, file_name, dst_dir)
        sys_tools.customizedCMD(cmd)

        dst_file_path = dst_dir + "/%s.sgm" % (file_name)
        repair_quote(dst_file_path)
        temp_content = qj2bj(open(dst_file_path).read())
        f = open(dst_file_path, 'w')
        try:
            for line in doc_denoise(temp_content, 'web', unify_punctuation):
                f.write(converter.convert(line))
        except ExpatError as e:
            sys_tools.errorRecord.add(str(e) + '\t' + file_name)
        f.close()
    return sys_tools.errorRecord

def extra_and_repair_news(*docids, src="", dst=""):
    if not src or not dst or not docids:
        return
    converter = Converter('zh-hans')
    sys_tools = SystemTools(True)

    key = get_entry_from_docid(docids[0])
    sub_dir_name = key[:-7].lower()

    src_pre = src
    dst_pre = dst
    xml_path = src_pre + "/" + sub_dir_name + "/" + key.lower()
    dst_dir = dst_pre + "/" + sub_dir_name + "/" + key.lower()
    sys_tools.customizedMkDir(dst_dir)

    parser = xml.sax.make_parser()
    handler = QueryDocHandler(list(docids))
    parser.setContentHandler(handler)
    try:
        parser.parse(xml_path)
    except xml.sax._exceptions.SAXParseException as e:
        sys_tools.errorRecord.add(str(e))
    for docid in handler.docDict:
        destPath = dst_dir + "/" + docid
        try:
            print(docid)
            f = open(destPath, 'w')
            temp_content = qj2bj(handler.docDict[docid])
            for line in doc_denoise(temp_content, 'news', unify_punctuation):
                f.write(converter.convert(line))
            f.close()
        except IOError as e:
            sys_tools.errorRecord.add(str(e))
    return sys_tools.errorRecord



def guess_path(prefix, norm_element, elements):
    '''There are two possible paths.
       One is determined by norm_element.
       The other is determined by the last two digits of the last element.
    '''
    onePossiblePath = prefix + "/%s/%s-%s.tgz" %(norm_element, elements[2], elements[3])
    anotherPossiblePath = prefix + "/%s/%s-%s.tgz" % (elements[3][-2:], elements[2], elements[3])
    if os.path.exists(onePossiblePath):
        file_path = onePossiblePath
    elif os.path.exists(anotherPossiblePath):
        file_path = anotherPossiblePath
    else:
        file_path = ""
    return file_path

def normalize_element(element):
    '''guarantee the norm_element is two digits.'''
    e = element
    if len(e) == 1:
        e = '0' + e
    return e

def repair_raw_xml(file_path):
    '''Add tag to the XML file so that it can be parsed by sax correctly.
    '''
    f = open(file_path, 'r')
    cBuffer = f.read()
    f.close()
    f = open(file_path, 'w')
    f.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<ZhangSheng comment=\"This tag is added in order to correctly handle xml in sax\">\n")
    f.write(cBuffer)
    f.write("</ZhangSheng>")
    f.close()

def get_entry_from_docid(docid):
    if docid[-5] == '.':
        entry = docid[:-7]
    else:
        entry = docid[:docid.rfind('-')]
    return entry

def collectDocid(entries, docids):
    '''entry:   storing all entries.
       docid:   storing all doc ids.
       Collect docid according to the entries in File List so that each XML file
       can be parsed only once later in creating query-related documents.
    '''
    docid_dict = {}
    for i in entries:
        ii = i.strip()
        docid_dict[ii] = set()
    for i in docids:
        docid = i.strip()
        entry = get_entry_from_docid(docid)
        docid_dict[entry].add(docid)
    return docid_dict

def repair_quote(path):
    '''This function is used to repair the web XML documents.
       Because their 'QUOTE' tag has no end, which could cause
       error in parsing XML.
    '''
    tag = ">"
    nTag = "/>"
    temp = []
    inRepair = False
    for line_with_newline in open(path):
        line = line_with_newline.strip()
        if inRepair:
            ptr = line.find(tag)
            if ptr != -1:
                line = line[:ptr] + nTag + line[ptr + len(tag):]
                inRepair = False
        else:
            ptr = line.find("<QUOTE")
            if ptr != -1:
                if line.find("PREVIOUSPOST") != -1:
                    tag = "\">"
                    nTag = "\"/>"
                pptr = line.find(tag, ptr)
                if pptr != -1:
                    line = line[:pptr] + nTag + line[pptr + len(tag):]
                else:
                    inRepair = True
        temp.append(line)
    f = open(path, 'w')
    for line in temp:
        f.write(line.strip() + '\n')
    f.close()

def doc_denoise(doc="", doc_type="", denoise_funct=lambda s : s):
    if not doc:
        return ""
    if doc_type == "news":
        return news_denoise(doc, denoise_funct)
    elif doc_type == "web":
        return web_denoise(doc, denoise_funct)
    else:
        return doc

def news_denoise(doc="", denoise_funct=lambda s : s):
    doc = xml.dom.minidom.parseString(doc)
    yield """<?xml version="1.0" encoding="utf-8"?>
<ZhangSheng comment="This tag is added in order to correctly handle xml in sax">
"""
    docElement = doc.getElementsByTagName('DOC')[0]
    yield """<DOC id="%s" type="%s">\n""" % \
            (docElement.getAttribute('id'), docElement.getAttribute('type'))
    headlineElements = doc.getElementsByTagName('HEADLINE')
    if len(headlineElements) == 1:
        headline = headlineElements[0].childNodes[0].nodeValue.strip()
        yield "<HEADLINE>" + denoise_funct(headline) + "</HEADLINE>\n"
    datelineElements = doc.getElementsByTagName('DATELINE')
    if len(datelineElements) == 1:
        dateline = datelineElements[0].childNodes[0].nodeValue.strip()
        yield "<DATELINE>" + denoise_funct(dateline) + "</DATELINE>\n"
    yield "<TEXT>\n"
    for paragraphElement in doc.getElementsByTagName('P'):
        for linesNode in paragraphElement.childNodes:
            if linesNode.nodeType == Node.TEXT_NODE:
                yield "<P>\n" + denoise_funct(linesNode.data.strip()) + \
                        "\n</P>\n"
    yield "</TEXT>\n</DOC>\n</ZhangSheng>"

def web_denoise(doc="", denoise_funct=lambda s : s):
    doc = xml.dom.minidom.parseString(doc)
    yield """<?xml version="1.0" encoding="utf-8"?>
<ZhangSheng comment="This tag is added in order to correctly handle xml in sax">
<DOC>
"""
    docidElement = doc.getElementsByTagName('DOCID')[0]
    yield "<DOCID>%s</DOCID>\n" % \
            (denoise_funct(docidElement.childNodes[0].nodeValue.strip()))
    doctypeElements = doc.getElementsByTagName('DOCTYPE')
    if len(doctypeElements) == 1:
        doctype = doctypeElements[0].childNodes[0].nodeValue.strip()
        yield """<DOCTYPE SOURCE="%s">%s</DOCTYPE>\n""" % \
                (doctypeElements[0].getAttribute('SOURCE'), denoise_funct(doctype))
    datetimeElement = doc.getElementsByTagName('DATETIME')[0]
    datetime = datetimeElement.childNodes[0].nodeValue.strip()
    yield "<DATETIME>%s</DATETIME>\n" % (denoise_funct(datetime))
    yield "<BODY>\n"
    headlineElement = doc.getElementsByTagName('HEADLINE')[0]
    headline = headlineElement.childNodes[0].nodeValue.strip()
    yield "<HEADLINE>%s</HEADLINE>\n" % (denoise_funct(headline))
    for textElement in doc.getElementsByTagName('TEXT'):
        yield "<TEXT>\n"
        for postElement in textElement.getElementsByTagName('POST'):
            yield "<POST>\n"
            posterElements = postElement.getElementsByTagName('POSTER')
            if len(posterElements) == 1:
                poster = posterElements[0].childNodes[0].nodeValue.strip()
                yield "<POSTER>%s</POSTER>\n" % \
                        (denoise_funct(poster))
            postdateElements = postElement.getElementsByTagName('POSTDATE')
            if len(postdateElements) == 1:
                postdate = postdateElements[0].childNodes[0].nodeValue.strip()
                yield "<POSTDATE>%s</POSTDATE>\n" % \
                        (denoise_funct(postdate))
            quoteElements = postElement.getElementsByTagName('QUOTE')
            if len(quoteElements) == 1:
                previouspost = quoteElements[0].getAttribute('PREVIOUSPOST')
                yield """<QUOTE PREVIOUSPOST="%s"/>\n""" % \
                        (denoise_funct(previouspost))
            linesNodes = postElement.childNodes
            if len(linesNodes) != 0:
                for linesNode in linesNodes:
                    if linesNode.nodeType == Node.TEXT_NODE:
                        if linesNode.data == '\n':
                            continue
                        yield denoise_funct(linesNode.data.strip()) + '\n'
            yield "</POST>\n"
        yield "</TEXT>\n"
    yield "</BODY>\n</DOC>\n</ZhangSheng>"

if __name__ == "__main__":
    pass
