
# --*-- coding:utf-8 --*--
"""
    使用textrank4关键字抽取算法则可
"""

from os import path
from abstract.text import parse_pdf , parse_document , parse_txt
from abstract.image import extract_doc , extract_pdf
from textrank4zh import TextRank4Keyword, TextRank4Sentence
from textrank4zh.Segmentation import WordSegmentation   
from abstract.extract_infov4 import extract_keys_lac
from pyhanlp import *
def load_stopwords(path):
    try:
        with open(path,encoding='utf-8') as f:
            words = f.read().split('\n')
        f.close()
    except:
        # 
        return ['a','the']
    return words

def filter_keys(keywords:str , stopwords:str):
    """
        定义好了停词列表
    """
    keywords = keywords.split('##')
    new_keys = []
    for key in keywords:
        if key not in stopwords:
            new_keys.append(key)
    return "##".join(new_keys)

def extract_keys(document,keys_count = 3):
    """
        also for image captions
    """
    try:
        TextRankKeyword = JClass("com.hankcs.hanlp.summary.TextRankKeyword")
        keywords = HanLP.extractKeyword(document, keys_count)
    except:
        return "skip this file!"
    return '##'.join(keywords) 
    

def extract_sents(document , sentences_count = 1):
    """
        textcnn抽取文档的关键字
    """
    try:
        TextRankSentence = JClass("com.hankcs.hanlp.summary.TextRankSentence")
        sentences = HanLP.extractSummary(document, sentences_count)
    except:
        return "skip this file!"

    return '##'.join(sentences) 
    
# def extract_keys(document,keys_count = 3):
#     """
#        textrank
#        todo: bugs!!
#     """
#     try:
#         tr4w = TextRank4Keyword()
#         tr4w.analyze(text=document, lower=True, window=1)
#         keywords = tr4w.get_keyphrases(keywords_num=keys_count, min_occur_num=1)
#     except:
#         return "skip this file!"
#     return '##'.join(keywords) 
    

# def extract_sents(document , sentences_count = 1):
#     """
#         textrank抽取文档的关键字
#     """
#     try:
#         tr4s = TextRank4Sentence()
#         tr4s.analyze(text=document, lower=True, source = 'all_filters')
#         sentences = tr4s.get_key_sentences(num=sentences_count)
#         sentences = [item['sentence'] for item in sentences]
#     except:
#         return "skip this file!"

#     return '##'.join(sentences) 




def abstract_file(file):
    """
        todo: some bugs to fix 
        return file abstract and keys
    """
    import os
    file_type = os.path.splitext(file)[-1]

    if file_type == '.docx':
        document  = parse_document(file)
    elif file_type == '.pdf':
        document = parse_pdf(file)
    elif file_type == '.txt':
        document = parse_txt(file) 

    
    keys = extract_keys(document)
    sents = extract_sents(document)

    return keys , sents



def get_image_path(dirs):
    """
        每张图片的绝对路径以列表形式返回
    """
    import os 
    paths =[]
    for root , _ , files in  os.walk(dirs):
        for file in files:
            paths.append(os.path.join(root,file))
    return paths


def get_image(file,tmp_dir):
    """
        将包含图片的文件图片抽取到临时文件夹【只支持pdf和doc】
        并且将每张临时图片的路径以字符串返回[使用换行符作为分割]
    """

    import os 
    file_type = os.path.splitext(file)[-1]

    if file_type =='.pdf':
        extract_pdf(file , tmp_dir)
        paths = get_image_path(tmp_dir)
        return paths

    elif file_type =='.docx':
        extract_doc(file , tmp_dir)
        paths = get_image_path(tmp_dir)
        return paths

    



# if __name__ == '__main__':
#     path = '../im2txt/test-example/test.txt'
#     document = r"据中新网报道，10月1日，国庆假期第一天，安徽黄山风景区天气晴朗，白云飘荡。一大早，数千名游客在迎客松等著名景点前观看升旗仪式，并齐唱国歌。据悉，黄山景区国庆当天接待游客7340人，预计2-4日客流将有所上升。"

#     def test_key(path):
#         print("#"*10)
#         return abstract_file(path)
    
#     test_key(path)

#     def test_abs():
#         print("#"*10)
