# --*-- coding:utf-8 --*--

from abstract.text import parse_pdf , parse_document , parse_txt
from abstract.image import extract_doc , extract_pdf


def load_stopwords(path):
    try:
        with open(path,encoding='utf-8') as f:
            words = f.read().split('\n')
        f.close()
    except:
        return ['a','the']
    return words

def filter_keys(keywords:str , stopwords:str):
    """
        定义好了停词列表
    """
    keywords = keywords.split('##')
    new_keys = []
    for key in keywords:
        if key not in stopwords:
            new_keys.append(key)
    return "##".join(new_keys)




def extract_keys(document,keys_count = 3):
    """
        also for image captions
    """
    try:
        TextRankKeyword = JClass("com.hankcs.hanlp.summary.TextRankKeyword")
        keywords = HanLP.extractKeyword(document, keys_count)
    except:
        return "skip this file!"
    return '##'.join(keywords) 
    

def extract_sents(document , sentences_count = 1):
    """
        textcnn抽取文档的关键字
    """
    try:
        TextRankSentence = JClass("com.hankcs.hanlp.summary.TextRankSentence")
        sentences = HanLP.extractSummary(document, sentences_count)
    except:
        return "skip this file!"

    return '##'.join(sentences) 

def abstract_file(file):
    """
        todo: some bugs to fix 
        return file abstract and keys
    """
    import os
    file_type = os.path.splitext(file)[-1]

    if file_type == '.docx':
        document  = parse_document(file)
    elif file_type == '.pdf':
        document = parse_pdf(file)
    elif file_type == '.txt':
        document = parse_txt(file) 

    
    keys = extract_keys(document)
    sents = extract_sents(document)

    return keys , sents



def get_image_path(dirs):
    """
        每张图片的绝对路径以列表形式返回
    """
    import os 
    paths =[]
    for root , _ , files in  os.walk(dirs):
        for file in files:
            paths.append(os.path.join(root,file))
    return paths


def get_image(file,tmp_dir):
    """
        将包含图片的文件图片抽取到临时文件夹【只支持pdf和doc】
        并且将每张临时图片的路径以字符串返回[使用换行符作为分割]
    """

    import os 
    file_type = os.path.splitext(file)[-1]

    if file_type =='.pdf':
        extract_pdf(file , tmp_dir)
        paths = get_image_path(tmp_dir)
        return paths

    elif file_type =='.docx':
        extract_doc(file , tmp_dir)
        paths = get_image_path(tmp_dir)
        return paths

    

# if __name__ == '__main__':

    # keys = ['a' ,'杨靖','第四套', 'cat','dog', '第五单元', '第五集', '第五卷', 
    #     '第五课', '第五年', '第五期', '第五位', '第五元素', '第五组', '召唤', 
    #     '最后一班', '最后一遍', '最后一关', '最后一集', '最后一科', '最后一颗子弹', '最后一派', '最后一题', 
    #     '最后一眼', '最后一页', '10', '11', '12', '35', '25', '2016', '2015', '2014', '又为什么', '有问题吗',
    #      '有问题么', '又喜欢', '有喜欢', '又小', '又笑', '有笑', '有效地', '有一百', '又一遍', '有一部', '又一城', 
    #      '又一村', '有一道', '有意的', '有一']

    # keys = "a##man##is##of##cat"

    # words = load_stopwords('/Users/yangjing/Desktop/UltraSearch/abstract/stopwords.txt')
    # # print(words)

    # print(filter_keys(keys , words))


    # print(filter_key
    # file_path = '/home/ultraman/桌面/example/doc/0.docx'
    # keys , sents = abstract_file(file_path)
    # fmt = "file :{}\nkeys:{}\nsents:{}\n".format(file_path , keys  ,sents)
    # print(fmt)(keys , stopwords=words))

#     file_path = "abstract/test/demov1.py"

    # # # test 
    # file_path = '/home/ultraman/桌面/example/pdf/3.pdf'
    # keys , sents = abstract_file(file_path)
    # fmt = "file :{}\nkeys:{}\nsents:{}\n".format(file_path , keys  ,sents)
    # print(fmt)


