# -*- encoding: utf-8 -*-

#File    :   Seg_Sents_En.py
#Time    :   2020/09/12 14:18:14
#Author  :   Leo Wood 
#Contact :   leowood@foxmail.com


import spacy
import re
nlp = spacy.load("en_core_sci_sm")

medium = re.compile(r"\[.*?\]", re.S)
def rep(rawstr, dict_rep):
    for i in dict_rep:
        rawstr = rawstr.replace(i, dict_rep[i])
    return rawstr

dict1 = {"Ffff": "Drs.", "gggg": "etc.", "ssss": "et?al.", "Kkkk": "Dr.", "jjjj": "vs.", "Hhhh":"Ref."}
dict2 = {"Drs.": "Ffff", "etc.": "gggg", "et?al.": "ssss", "Dr.": "Kkkk", "vs.": "jjjj","Ref.": "Hhhh"}
# , "hhhh": ". ["
def seg_sens(text):
    reg_list = re.findall(medium, text)
    vim_list = [k.replace(" ","").replace(",", "").replace("–","").replace("-","") for k in reg_list]
    # print(vim_list)
    texts = rep(text, dict2)
    # print(texts)
    sentences = [str(sen) for sen in nlp(texts).sents]

    indexs = ['1.','2.','3.','4.','5.','6.','7.','8.','9.']

    flag = 1
    while flag:
        flag = 0
        for i, sen in enumerate(sentences):
            # print(sen)
            if i > 0:
                sen_pre = sentences[i - 1]
            else:
                sen_pre = ''
            if i < (len(sentences) - 1):
                sen_next = sentences[i + 1]
            else:
                sen_next = ''

            # 带序号的
            if sen in indexs:
                # print(sen)
                if sen_next:
                    sentences[i+1] = sen + ' ' + sen_next
                    
                else:
                    sentences[i-1] = sen_pre + ' ' + sen
                sentences.remove(sen)
                flag = 1
                break

            # 带符号的
            if sen[-3:] == "+/-":
                sentences[i + 1] = sen + ' ' + sen_next
                sentences.remove(sen)
                flag = 1
                break

            # 【】不对称的
            if "]" in sen and (sen_pre.count("[") + sen_pre.count("]")) % 2 == 1:
                sentences[i - 1] = sen_pre + ' ' + sen
                sentences.remove(sen)
                flag = 1
                break

            # []开头，分错句
            if sen[0] == '[' and any(_.isdigit() for _ in vim_list):
                # print(sen)
                
                # print(reg_list)
                # print(sen)               
                # print(sentences)
                # if len(sentences) >1:
                #     print(sen)

                if "]" in sen:
                    print(sen)
                    nPos = sen.index("]")
                    sentences[i-1] = sen_pre + ' ' + sen[:nPos+1]
                    sentences.remove(sen)
                    # print(nPos+1,len(sen))
                    if nPos+1 != len(sen):
                        sen = sen[nPos+1:]
                        # print(sentences.index(sen))
                        sentences.insert(i, sen)
            #             else:
            #                 pass
                # elif "]" in sentences:
                #     sentences[i + 1] = sen +" "+sen_next
                #     sentences.remove(sen)
            #         else:
            #             sentences.remove(sen)
            #             sentences.insert(i, sen[1:])
            #     elif len(sentences) == 1:
            #         continue
                    # del sentences
                    
                flag = 1
                break

            #     # except:
            #     #     pass

            # 小写字母开头
            if 'a' <= sen[0] <= 'z':
                # print(sen[0])
                # if sen:
                
                sentences[i-1] = sen_pre + ' ' + sen
                # print(sen)
                # try:
                # print(sentences)
                if len(sentences) > 1:
                    sentences.remove(sen)
                else:
                    pass
                flag = 1
                break
                # except:
                #     pass

            # 括号不对称的
            if ")" in sen and (sen_pre.count("(") + sen_pre.count(")")) % 2 == 1:
                # print(sen)
                sentences[i - 1] = sen_pre + ' ' + sen
                sentences.remove(sen)
                flag = 1
                break

            # “ ”不对称的
            if "”" in sen and (sen_pre.count("“") + sen_pre.count("”")) % 2 == 1:
                sentences[i - 1] = sen_pre + ' ' + sen
                sentences.remove(sen)
                flag = 1
                break
    if len(sentences)>0:
        sentences_ = [rep(sentences[i], dict1) for i in range(len(sentences))]
    return sentences_

if __name__ == '__main__':
    pass
    # text = "Léveillard et al. [56] identified a rod-derived cone viability factor (RdCVF) that appears to be a truncated thioredoxin-like protein which significantly delays cone death in the rd1 mouse model of RP. Studies are ongoing to test whether this factor will be efficient in other forms of RP. [40]"
    # text = "Meltwater exits the cavity beneath the Dotson Ice Shelf south of the polynya (fig. S2A), but the core of this outflow is located on the western edge of the polynya (12) and therefore likely has less impact on convection in the Amundsen Polynya.   Basal melt of upstream ice shelves therefore provides sufficient freshwater input to compensate a large fraction of the salt flux released during sea ice formation in the Amundsen Polynya. [40] As a result, winter convection does not extend to the seafloor, no DSW is formed, and warm MCDW flowing through the Dotson Trough (fig. S2A) can reach the Dotson (12) and Getz ice shelves (13) and drive rapid basal melt."
    # text = "Following the QC and clumping procedures, the number of SNPs used in the construction of the PRS was 84,270 (Correction 1: 80,167 SNPs; Correction 2: 80,130 SNPs). [Correction added on 14 Nov 2019 after first online publication: in the preceding sentence, the text in the parentheses has been added.]. The results show that the PRS trained on the SLI sample predicts some risk of the broad language phenotype and significantly predicts some of the risk of the narrow language phenotype (SLI), with adjusted R\n2 = 4% and 6.24%, P = 0.051 and 0.024, respectively (in both cases the association with PRS was positive i.e. the regression coefficient for PRS was positive). However, it does not predict risk of ASD or ADHD (adjusted R\n2 = 0.0004%, 0.01%; P = 0.984, 0.889, respectively), as can be seen in Figure 3 and Table 1, which also shows the different sample sizes. The results for the corrected analyses showed similar trends, see Table 1 for details. [Correction added on 14 Nov 2019 after first online publication: the preceding sentence was added.] The PRS analysis with height as the target phenotype included 274 children with confirmed non‐missing height phenotype and covariate for age at measurement and resulted in an R\n2 of 0.2% (P = 0.452). The results for the new analyses for height were: R\n2 of 0.019%, 0.061%; P = 0.814, 0.676, for Corrections 1 and 2, respectively. [Correction added on 14 Nov 2019 after first online publication: the preceding sentence was added.] The above two‐sided P‐values are based on the degree to which the PRS regression coefficients are different from zero, estimated using a Wald test (a t‐test in the case of height, as it is estimated in a linear regression; otherwise, the normal distribution is used to obtain the P‐value for the coefficient from a logistic regression). Figure 3 also shows the odds ratios from the logistic regression coefficients and their confidence intervals, computed in R v3.4.2 [R Core Team, 2014] using PRS normalized across all children for the four neurodevelopmental phenotypes. The TROG‐2 thresholds chosen for defining cases and controls conform to thresholds used in previous SLI Consortium studies and to the qualitative assessment in the Danish version of the TROG‐2 manual, where scores in the 85–90 range are considered “lower part of the average.” However, we recognize that excluding children whose scores were higher than 77.5 but lower than 92.5 could bias the results of the regressions for the two language phenotypes and could also result in a loss of power. We also note, however, that the discovery GWAS did not use controls, but, rather, family‐based case subsets, which means this issue would not have affected the weights used in the construction of the PRS. That said, we examined how defining the above group of children as controls, instead of excluding them, might affect the results: for the narrow language phenotype (SLI), a Nagelkerke's pseudo R\n2 of 4.92% (P = 0.012) was obtained (3.95%, 4.18%; P = 0.023, 0.02, for Corrections 1 and 2, respectively); for the broad language phenotype, a Nagelkerke's pseudo R\n2 of 3.29% (P = 0.024) was obtained (2.69%, 2.73%; P = 0.04, 0.039, for Corrections 1 and 2, respectively). [Correction added on 14 Nov 2019 after first online publication: In the preceding sentence, the text in parentheses following “… (P = 0.012) was obtained” and the text in parentheses following “… (P = 0.024 was obtained” was added. The sentence “In the previous analyses, these were 4.34% and 2.73%, respectively (Table 1),” which followed the preceding sentence, was deleted.] We also examined how adding a covariate for whether the child is from a VIA7 high risk family (i.e., a family in which at least one parent had a diagnosis of schizophrenia or bipolar disorder) might affect the results (we note that at least 75% of the children in each of the case groups come from a high risk family); while adding a covariate resulted in a higher Nagelkerke's pseudo R\n2 for the models of all neurodevelopmental phenotypes, indicating that the covariate for high risk status explained some of the risk and thus improved the models, the only model in which the PRS was significantly associated with the outcome was the one for the narrow language phenotype (SLI) (P = 0.043); this did not pass the 0.05 threshold for Corrections 1 and 2 (P = 0.068, 0.059, respectively), due to reduced power. [Correction added on 14 Nov 2019 after first online publication: In the preceding sentence, the text “just as before” was deleted following “(P = 0.043)”, and the text that follows “(P = 0.043)” was added.]"
    # print(seg_sens(text))