import sys

import nltk

sys.path.append('../')

import os
from collections import OrderedDict
from nltk.tokenize import sent_tokenize

from resources import BASE_DIR#BASE_DIR是 resources文件所在路径

class CorpusReader:
    #topic_path是选择一个文档
    def __init__(self,topic_path,base_path=BASE_DIR,):
        self.base_path = base_path
        self.data_path = os.path.join(self.base_path,topic_path)

    def __call__(self,path='input_docs'):
        docs_dic = self.readDocs(path)
        return docs_dic

    #每个文档对应一个摘要 list[ref1,ref2,...]
    def readSummaries(self):
        summary_path = os.path.join(self.data_path, 'summaries')
        summaries = []
        for tt in sorted(os.listdir(summary_path)):
            if tt[0] == '.':#这里的判断是为了什么？
                continue # skip hidden files
            text = open(os.path.join(summary_path,tt), 'r').read()
            summaries.append( text ) 
        return summaries
    #同上 针对参考摘要 添加了额外的语句划分处理 返回list[(路径，分句后的摘要)]
    def readReferences(self):
        ref_path = os.path.join(self.data_path, 'references')
        refs = []
        for tt in sorted(os.listdir(ref_path)):
            if tt[0] == '.':
                continue # skip hidden files
            text = open(os.path.join(ref_path,tt), 'r').read()
            refs.append( (os.path.join(ref_path,tt), sent_tokenize(text)) ) 
        return refs

    #path是文档集合路径 返回list 每个元素是tuple(文档绝对路径，文档的语句)  #
    def readDocs(self,path,datatype='CNN'):
       # dpath = os.path.join(self.data_path,'input_docs')
        dpath = os.path.join(self.data_path, path)
        topic_docs = []
        for tt in sorted(os.listdir(dpath)):
            if tt[0] == '.':
                continue # skip hidden files
            if datatype=='CNN':
                entry = self.readCNNDoc(os.path.join(dpath,tt))#entry被文档分词了
            else:
                entry = self.readOneDoc(os.path.join(dpath, tt))  # entry被文档分词了
            topic_docs.append((os.path.join(dpath,tt),entry))

        return topic_docs

    #读取一个文档 并且使用ntlk切分语句
    def readOneDoc(self,dpath):
        ff = open(dpath,'r')
        flag = False
        text = []
        for line in ff.readlines():
            if '<TEXT>' in line:
                flag = True
            elif '</TEXT>' in line:
                break
            elif flag and line.strip().lower() != '<p>' and line.strip().lower() != '</p>':
                text.append(line.strip())

        ff.close()

        return sent_tokenize(' '.join(text))

    #读取一个文档 并且使用ntlk切分语句
    def readCNNDoc(self,dpath):
        ff = open(dpath,'r',encoding='utf-8')
        text = []
        for line in ff.readlines():
            if line.rstrip('\n')=='@highlight':
                break
            elif line.rstrip('\n')=='':
                continue
            else:
                text.append(line.strip())

        ff.close()

        return sent_tokenize(' '.join(text))





if __name__ == '__main__':
    #下载分词语料库 只用一次即可
    #nltk.download('punkt')

    # reader = CorpusReader('data/topic_1')
    # docs=reader.readDocs('input_docs')
    # print(docs)

    #下面读取 CNN数据集
    reader = CorpusReader('data/CNN')
    docs=reader.readDocs('input_docs')
    print(docs)