# -*- coding: utf8 -*-

import pickle
import xml.sax
from objects.document import Web, News
from objects.handler.queryhandler import QueryExtractionHandler
from general.fileoperations import *
from general.stringoperations import ngram
from general.mysqloperations import MySQL
from general.generaltools import SystemTools, Debugger

def init_cd():
    data = "/home/fengys/data/Chinese_Queries_Related_Source_Corpus/data/new"
    #data = "/Users/EvanCheung/program/data"

    error_record = {}
    debugger = Debugger(True)
    result, queries_dict = get_queries_from_xml('/home/fengys/data/Chinese_Queries_Related_Source_Corpus/data/new/query', True)
    #result, queries_dict = get_queries_from_xml('/Users/EvanCheung/program/data/query', True)
    error_record['get_queries_from_xml'] = result
    debugger.checkError(error_record)

    keyword_id_dict, entity_id_dict, query_info_dict = load_db()

    mysql = MySQL(use_db=True)
    mysql.connect()

    return data, queries_dict, keyword_id_dict, entity_id_dict, query_info_dict, mysql

def get_queries_from_xml(xml_path, isDebug=False):
    parser = xml.sax.make_parser()
    handler = QueryExtractionHandler(isDebug)
    parser.setContentHandler(handler)
    funct = parser.parse

    def file_type_limit(parent, file_name):
        if file_name.split('.')[-1] == 'xml':
            return True
        else:
            return False

    sys_tools = SystemTools(isDebug)
    sys_tools.osWalk(xml_path, funct=funct, limit=file_type_limit, \
            error=xml.sax._exceptions.SAXParseException)

    return sys_tools.errorRecord, handler.dict

def get_document(docid, data_path, isDebug=False):
    doc_type, relative_path = docid2path(docid)
    plain_text_path = get_plain_path(data_path, relative_path)
    postag_text_path = get_postag_path(data_path, relative_path)
    if doc_type == 'news':
        doc = News(docid=docid, plain_text_path=plain_text_path, \
                postag_text_path=postag_text_path, isDebug=isDebug)
    else:
        doc = Web(docid=docid, plain_text_path=plain_text_path, \
                postag_text_path=postag_text_path, isDebug=isDebug)
    if doc.plain_text == "":
        if isDebug:
            print("\tError: No Text")
        return False
    if doc.postag_text == "":
        if isDebug:
            print("\tError: No Pos tag Text")
        return False
    doc.get_lines()
    return doc

def init_query(query, entity_id_dict, keyword_id_dict, query_info_dict, doc):
    query.entity_db = entity_id_dict
    query.keyword_db = keyword_id_dict
    query.query_db = query_info_dict
    query.update_query_location(doc)

def load_db():
    mydb = open('./general/db/keyword-id.db', 'rb')
    keyword_id_dict = pickle.load(mydb)
    mydb.close()
    mydb = open('./general/db/entity-id.db', 'rb')
    entity_id_dict = pickle.load(mydb)
    mydb.close()
    mydb = open('./general/db/query_info.db', 'rb')
    query_info_dict = pickle.load(mydb)
    mydb.close()
    return keyword_id_dict, entity_id_dict, query_info_dict


if __name__ == "__main__":
    error_record = {}
    debugger = Debugger(True)
    #data = "/home/fengys/data/Chinese_Queries_Related_Source_Corpus/data/zs"
    data = "/Users/EvanCheung/program/data"
    result, queries_dict = get_queries_from_xml('tac')
    error_record['get_queries_from_xml'] = result
    anonymous(data, queries_dict, True)
    debugger.checkError(error_record)
    import sys
    sys.exit(0)

    parser = xml.sax.make_parser()
    handler = QueryExtractionHandler()
    parser.setContentHandler(handler)
    #parser.parse('ttt')
    parser.parse('tac/tac_2012_kbp_chinese_entity_linking_web_training_queries.xml')
    parser.parse('tac/tac_2012_kbp_chinese_evaluation_entity_linking_queries_v1.1.xml')
    parser.parse('tac/tac_2011_kbp_cross_lingual_evaluation_entity_linking_queries.xml')
    parser.parse('tac/tac_2011_kbp_cross_lingual_evaluation_entity_linking_queries_v1.1.xml')
    count = 100000
    mydb = open('content-id.db', 'rb')
    content_id = pickle.load(mydb)
    mysql = MySQL()
    mysql.connect()
    for key in handler.dict:
        if count < 0:
            break
        count -= 1
        doc_type, relative_path = docid2path(key)
        plain_text_path = data + '/doc/' + relative_path
        postag_text_path = data + '/doc-pro/' + relative_path
        if doc_type == 'news':
            doc = News(docid=key, plain_text_path=plain_text_path, \
                    postag_text_path=postag_text_path, isDebug=True)
        else:
            doc = Web(docid=key, plain_text_path=plain_text_path, \
                    postag_text_path=postag_text_path, isDebug=True)
        if doc.plain_text == "":
            print("\tError: No Text")
            continue
        if doc.postag_text == "":
            print("\tError: No Pos tag Text")
            continue
        doc.get_lines()
        for q in handler.dict[key]:
            doc.update_query_plain_text_location(q)
            doc.update_query_sentences_location(q)
            doc.updata_segmentation_location(q)
            print (q)
            doc.print_query_context(q)

            ngram_list = []
            id_list = []
            result_list = []
            for line_num in doc.get_query_context(q):
                ngram_list += ngram(doc.segmentalized_sentences[line_num], 5)
                #print(''.join(doc.segmentalized_sentences[line_num]))
            for w in sorted(set(ngram_list)):
                if w in content_id:
                    id_list.append(str(content_id[w]))
            for cid, cc, ckp in mysql.get_keyphraseness(*id_list):
                result_list.append((cid, cc, ckp))
            if result_list:
                print()
                print("{0:<12}\t{2:<20}\t{1:<}".format('id', 'keyword', 'keyphraseness'))
            for cid, cc, ckp in sorted(result_list, key=lambda r: -r[2]):
                print("{0:<12d}\t{2:<20F}\t{1:<}".format(cid, cc, ckp))

            if doc.bugInfo != "":
                doc.dPrint(doc.bugInfo)
            print("\n")
    mysql.release()
    mydb.close()
