# -*- coding: utf-8 -*-

from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals

from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Lsa_Summarizer
from sumy.summarizers.luhn import LuhnSummarizer as Luhn_Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words

import re
import math
import sys

LANGUAGE = "chinese"
SENTENCES_COUNT = 10

# 暂定,大概需要以后完善
stopwords = ['住院', '医院' , '正常', '诊治', '就诊', '没有', '收住', '出院', '否认', '未作', '不详']


# 判断字符串是否可能为缺省值，返回权值
def content_check(text):
    # 以“无”开头的句子和数字开头的很可能是缺省值
    if text[0:1] == '无' or text[0:1].isdigit() or text[0:1] == '未':
        return 1
    # 包含停用词的很可能是缺省值
    for word in stopwords:
        if word in text:
            return 1
    # 有字母或者数字的一般是术语或者症状或者诊断信息
    if re.search(r'\d', text) or re.search(r'[a-zA-Z]', text):
        return 0
    return 0.4


def text_filter(src):
    # 替换逗号为句号
    src = src.replace('，', '。').replace(',', '。')

    # 把src分割成小句
    src_l = src.split('。')

    # 遍历句子,给每个句子安排一个idx,估计缺省句数量,记录缺省句idx
    weight_l = []
    i = 0
    cnt = 0.0
    for substr in src_l:
        weight_l.append(content_check(substr))
        cnt = cnt + weight_l[i]
        i = i + 1

    # cnt为sumy最终筛选出的句子数 (总句子数-缺省值句子估计数)
    cnt = len(src_l) - math.floor(cnt)

    # 去除src中的英文字母,src_sumy用于让sumy进行筛选
    src_sumy = re.sub('[a-zA-Z]','',src)

    # sumy
    parser = PlaintextParser.from_string(src_sumy, Tokenizer(LANGUAGE))
    stemmer = Stemmer(LANGUAGE)
    # lsa
    lsa_summarizer = Lsa_Summarizer(stemmer)
    lsa_summarizer.stop_words = get_stop_words(LANGUAGE)
    # luhn
    luhn_summarizer = Luhn_Summarizer(stemmer)
    luhn_summarizer.stop_words = get_stop_words(LANGUAGE)

    # lsa和luhn结果转为str列表
    lsa_result = []
    for sentence in lsa_summarizer(parser.document, cnt):
        # sentence转为str并去掉最后的“。”后加入lsa_result
        lsa_result.append(str(sentence)[:-1])
    luhn_result = []
    for sentence in luhn_summarizer(parser.document, cnt):
        luhn_result.append(str(sentence)[:-1])

    # 最后选择返回的句子集
    i = 0
    src_sumy_l = src_sumy.split('。')
    result = ""
    lsa_idx = 0
    luhn_idx = 0
    range = len(lsa_result)

    # 找出被lsa选择的句子的idx(也就是i)
    for substr in src_sumy_l:
        lsa_selected = substr == lsa_result[lsa_idx]
        luhn_selected =  substr == luhn_result[luhn_idx]

        # 权值为0 或者 同时被lsa和luhn选中 则加入result  (较宽容)
        # if weight_l[i] == 0 or lsa_selected and luhn_selected:
        # 权值为0 或者 同时被lsa和luhn选中且权值小于1 则加入result  (较严格)
        if weight_l[i] == 0 or lsa_selected and luhn_selected and weight_l[i] < 1:
            result += src_l[i] + "。"
        # 被1种算法选中且权值小于1则加入result
        elif (lsa_selected or luhn_selected) and weight_l[i] < 1:
            result += src_l[i] + '。'

        i += 1
        if lsa_selected and lsa_idx + 1 < range:
            lsa_idx += 1
        if luhn_selected and luhn_idx + 1 < range:
            luhn_idx += 1

    # 返回最终结果
    return result


if __name__ == "__main__":
    print(text_filter(sys.argv[1]))
#     # -------------测试一-------------
#     # print('-------------测试一-------------')
#     src = sys.argv[1]
#     # src = '患者2小时前无原因出现腹部疼痛，初表现为上腹部隐痛，3小时后疼痛逐渐加重并转移至右下腹固定，无腰背部及会阴处放散痛，呈阵发性发作，伴有恶心，呕吐数次，呕吐物为胃内容物，量约200ml。未经任何诊治，因腹痛不缓解前来就诊。患者就诊中无咳嗽咳痰，无心悸气短，无呼吸困难，无腹胀腹泻，无尿频尿急及尿血。大小便正常。'
#     print("[src ] " + src.replace('，','。').replace(',','。') + "\n")
#     ecas_result = text_filter(src)
#     print("[ecas] " + ecas_result + "\n")
#     src = src.replace('，','。').replace(',','。')
#     sentence_cnt = len(ecas_result.split("。"))
#
#     parser = PlaintextParser.from_string(src, Tokenizer(LANGUAGE))
#     stemmer = Stemmer(LANGUAGE)
#
#     summarizer = Lsa_Summarizer(stemmer)
#     summarizer.stop_words = get_stop_words(LANGUAGE)
#     result = ""
#     for sentence in summarizer(parser.document, sentence_cnt):
#         result += str(sentence)
#     print("[lsa ] " + result + "\n")
#
#     summarizer = Luhn_Summarizer(stemmer)
#     summarizer.stop_words = get_stop_words(LANGUAGE)
#     result = ""
#     for sentence in summarizer(parser.document, sentence_cnt):
#         result += str(sentence)
#     print("[luhn] " + result + "\n")
#
#     print("   ")
#     # -------------测试二-------------
#     print('-------------测试二-------------')
#     src = '反复咳嗽、咳痰、喘息10年余，加重3天。患者于2006 年春季起受凉后反复咳嗽，咳大量白疲，前往天心社区医院就诊，查胸片示肺纹理增粗，给予头抱类抗生素抗感染治疗后症状逐渐缓解。2006年秋季患者受凉后再次发作上述症状，伴喘息，活动后症状加重，症状于抗感染治疗后1个月逐渐缓解。后上述症状于冬春季节反复发作，每年发作时间约3个月，喘息症状逐渐加重，平地行走时常因呼吸困难而停下休息，休息后可缓解，咳痰量逐渐增多，抗生素抗感染治疗的时间加长。2010年12月患者受凉后再次出现发作性胸闷、喘息，伴明显咳嗽，咳大量白痰，夜间咳嗽症状重，就诊于岳麓医院体检时，查胸片示肺纹理增粗、素乱，查肺功能示残气量增高，诊断为慢性阻塞性肺疾病。'
#     print("[src ] " + src.replace('，','。').replace(',','。') + "\n")
#     ecas_result = text_filter(src)
#     print("[ecas] " + ecas_result + "\n")
#     src = src.replace('，','。').replace(',','。')
#     sentence_cnt = len(ecas_result.split("。"))
#
#     parser = PlaintextParser.from_string(src, Tokenizer(LANGUAGE))
#     stemmer = Stemmer(LANGUAGE)
#
#     summarizer = Lsa_Summarizer(stemmer)
#     summarizer.stop_words = get_stop_words(LANGUAGE)
#     result = ""
#     for sentence in summarizer(parser.document, sentence_cnt):
#         result += str(sentence)
#     print("[lsa ] " + result + "\n")
#
#     summarizer = Luhn_Summarizer(stemmer)
#     summarizer.stop_words = get_stop_words(LANGUAGE)
#     result = ""
#     for sentence in summarizer(parser.document, sentence_cnt):
#         result += str(sentence)
#     print("[luhn] " +result + "\n")

