﻿using System;
using System.Collections.Generic;
using System.Text;

namespace MatchBilingualSent
{
    /**
 * Token种类
 */
    enum TokenType { CHINESE, ENGLISH, NUMBER, PUNCTUATION, SPACE };

    /**
 * Token结构
 */
    struct Token
    {
        public string val;
        public TokenType type;
    };


    public class MultiSplit
    {
        const int NORMAL_TOKENS_IN_BLOCK = 20;	// 段中句子的正常最小长度
        const int MIN_TOKENS_IN_BLOCK = 10;		// 段中可以容忍的最小句子长度
        const int MAX_SENTS_UNDER_NORMAL = 2;	// 段中可以容忍的长度小于MIN_TOKENS_IN_BLOCK的句子的数量
        const int MIN_SENTS = 2;			// 组成一个段的最小句子数


        const int PAIR_PUNC_TOLARANCE = 6;		// 成对标点符号中，可以容忍的标点符号个数
        const int ENG_TOKEN_IN_CHINESE = 4;		// 中文中可以容忍的英文Token数


        /**
         * 为中文Token设置类型? * 如果是中文标点符号的话，同时将其转为英文标点
         * @param &token
         */
        void formatChineseToken(ref Token token)
        {
            int SIZE_TO_CHANGE = 26;
            string[] origin = {
    	            "。", "，", "｜", "、", "—",
	            "～", "…", "？", "；", "：",
	            "！", "＃", "＆", "／", "（",
	            "）", "〈", "〉", "《", "『",
	            "』", "“", "”", "‘", "》",
	            "’"
                };
            string[] changed = {
	            ".", ",", "|", ",", "-",
	            "~", "...", "?", ";", ":",
	            "!", "#", "&", "/", "(",
	            ")", "<", ">", "[", "[",
	            "]", "\"", "\"", "`", "]",
	            "\'"
                };

            string cha = token.val;
            if (cha == "　")
            {
                token.val = " ";
                token.type = TokenType.SPACE;
                return;
            }
            for (int i = 0; i < SIZE_TO_CHANGE; i++)
            {
                if (cha == origin[i])
                {
                    token.val = changed[i];
                    token.type = TokenType.PUNCTUATION;
                    return;
                }
            }
            token.type = TokenType.CHINESE;
        }

        bool isprint(char c)
        {
            if (c >= 0x20 && c <= 0x7e)
            {
                return true;
            }

            return false;
        }

      

        bool isEngSpace(char c)
        {
            // 非打印字符作为space
            return (c < 0x80 && !isprint(c)) || Utility.isspace(c);
        }

        bool isalnum(char c)
        {
            if (c >= '0' && c <= '9')
            {
                return true;
            }

            if (c >= 'a' && c <= 'z')
            {
                return true;
            }

            if (c >= 'A' && c <= 'Z')
            {
                return true;
            }

            return false;
        }

        bool ispunct(char c)
        {
            if (isprint(c) == true && isalnum(c) == false)
            {
                return true;
            }

            return false;
        }


        bool isEngWordChar(char c)
        {
            return isalnum(c) || c == '-' || c == '\'' || c == '.';
        }


        /**
         * Tokenization
         * @param line	语句
         * @param tokens	存放Tokens的数组
         * @return	Token数
         */
        int tokenize(ref string line, ref List<Token> tokens)
        {
            int pos = 0;
            while (pos < line.Length)
            {
                Token token = new Token();
                int start = pos;
                if (line[pos] >= 0x80)
                {
                    token.val = line.Substring(start, 1);
                    formatChineseToken(ref token);	// 得到中文字符的type
                    pos++;
                }
                else if (isEngSpace(line[pos]))
                {		// space
                    while (pos < line.Length && isEngSpace(line[pos]))
                    {
                        pos++;
                    }
                    token.val = line.Substring(start, pos - start);
                    token.type = TokenType.SPACE;
                }
                else if (ispunct(line[pos]))
                {		//标点
                    pos++;
                    token.val = line.Substring(start, pos - start);
                    token.type = TokenType.PUNCTUATION;
                }
                else
                {
                    bool allNum = true;
                    while (pos < line.Length && isEngWordChar(line[pos]))
                    {	// 除空格外的字符算一个token
                        if (!char.IsDigit(line[pos]) && line[pos] != '.')
                        {
                            allNum = false;
                        }
                        pos++;
                    }
                    while (pos > (start + 1) && ispunct(line[pos - 1]))
                    {
                        pos--;
                    }

                    token.val = line.Substring(start, pos - start);
                    token.type = allNum ? TokenType.NUMBER : TokenType.ENGLISH;
                }
                tokens.Add(token);
            }

            return tokens.Count;
        }


        /**
         * @return 	是否是成对标点符号的结束半
         */
        bool isEndPairPunc(ref Token token, SentType sentType, ref string startPunc)
        {
            string cha = token.val;
            if (startPunc == "(" && cha == ")")
                return true;
            if (startPunc == "[" && cha == "]")
                return true;
            if (startPunc == "\"" && cha == "\"")
                return true;
            if (startPunc == "`" && cha == "\'")
                return true;
            return false;
        }



        /**
         * @return 	是否是成对标点符号的开始半
         */
        bool isBeginPairPunc(Token token, SentType sentType)
        {
            string cha = token.val;
            if (cha == "(" || cha == "[" || cha == "\"" || cha == "`")
            {
                return true;
            }
            return false;
        }

        /**
         * 一句句子确定后，将这句句子放入存放结果的容器
         */
        void pushback_sent(ref List<Token> tokens, int sentStart, int sentEnd, SentType sentType, ref List<Sentence> sentences)
        {
            Sentence sent = new Sentence();
            sent.type = sentType;
            sent.tokenNum = sentEnd - sentStart;
            sent.value = "";
            for (int i = sentStart; i < sentEnd; i++)
            {
                if (sentType == SentType.SENT_ENG && tokens[i].type == TokenType.PUNCTUATION)
                {
                    sent.value += ' ';		// 标签符号加空格，方便断词
                }
                sent.value += tokens[i].val;
                if (sentType == SentType.SENT_ENG && tokens[i].type == TokenType.PUNCTUATION && isBeginPairPunc(tokens[i], SentType.SENT_ENG))
                {
                    sent.value += ' ';		// 标签符号加空格，方便断词
                }
            }
            sentences.Add(sent);
        }

        /**
         * @return	是否是分句标点符号,这个Punc也会包括在这句句子中
         */
        bool isSplitPunc(ref Token token, SentType sentType)
        {
            string cha = token.val;
            if (cha == "." || cha == "?" || cha == "!")
                return true;
            return false;
        }


        /**
         * 从开始位置开始连续ENGLISH Token的数量
         * @param tokens
         * @param tokenNum	Token总数
         * @param &i	开始位置，返回时变成最后查找的位置
         * @return	从开始位置开始连续ENGLISH Token的数量
         */
        int nextEngTokenNumInChi(ref List<Token> tokens, ref int i)
        {
            int engInChi = 0;
            for (; i < tokens.Count && engInChi <= ENG_TOKEN_IN_CHINESE; i++)
            {
                Token t = tokens[i];
                if (t.type == TokenType.ENGLISH || t.type == TokenType.NUMBER)
                {
                    engInChi++;
                }
                else if (t.type == TokenType.SPACE)
                {
                }
                else
                {
                    break;
                }
            }
            return engInChi;
        }


        /**
         * 中文分句函数
         * 没有设定canSplitAt标志时，分句规则是：
         * 1. 遇到不是紧跟CHINESE Token的ENGLISH Token
         * 2. 遇到紧跟CHINESE Token的ENGLISH Token,向前搜索ENG_TOKEN_IN_CHINESE个Token,
         *    如果没有中文则在英文前分句，认为英文句子开始.即可以容忍一定数量的ENGLISH
         *    Token夹在中文中，如"我喜欢FIFA游戏。"
         * 3. 遇到isSplitPunc(SENT_CHI)中文分句标点
         * 4. 遇到空格，且空格前为中文或标点
         *
         * 设定canSplitAt标志的规则是：
         * 1. 一对PairPunc成功匹配结束.注意，当遇到isBeginPairPunct()时，向前搜索PAIR_PUNC_TOLARANCE个字符，
         *    以防遇到只有开始没有结束的错误情况.向前搜索过程中，同样只能容忍ENG_TOKEN_IN_CHINESE个ENGLISH Token。
         *
         * 设定canSplitAt标志时，分句规则是：
         * 1. 下一个Token为isBeginPairPunc()
         * 1. 下一个Token为空格
         *
         * @param tokens
         * @param tokenNum	Token总数
         * @param tokenIndex	下一个处理的Token
         * @param sentences	存放分句结果的容器
         */
        void chiSplit(ref List<Token> tokens, ref int tokenIndex, ref List<Sentence> sentences)
        {
            int canSplitAt = -1;
            int sentStart = tokenIndex;
            while (tokenIndex < tokens.Count)
            {
                Token curToken = tokens[tokenIndex++];
                switch (curToken.type)
                {
                    case TokenType.CHINESE:
                        break;
                    case TokenType.ENGLISH:
                        if (tokens[tokenIndex - 2].type == TokenType.CHINESE)
                        {
                            // 中文中夹英文
                            int i = tokenIndex - 1;
                            int engInChi = nextEngTokenNumInChi(ref tokens, ref i);
                            if (engInChi > ENG_TOKEN_IN_CHINESE)
                            {	// 大于容忍英文词数
                                --tokenIndex;
                                pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_CHI, ref sentences);
                                return;
                            }
                            else
                            {
                                tokenIndex = i;
                            }
                        }
                        else
                        {
                            --tokenIndex;
                            pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_CHI, ref sentences);
                            return;
                        }
                        break;
                    case TokenType.NUMBER:
                        break;
                    case TokenType.PUNCTUATION:
                        if (canSplitAt != -1)
                        {
                            // 如果有标记，并且又是另一个Pair的开始
                            if (isBeginPairPunc(curToken, SentType.SENT_CHI))
                            {
                                tokenIndex = canSplitAt;
                                pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_CHI, ref sentences);
                                return;
                            }
                            canSplitAt = -1;
                        }
                        if (isSplitPunc(ref curToken, SentType.SENT_CHI))
                        {
                            pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_CHI, ref sentences);
                            return;
                        }
                        else if (isBeginPairPunc(curToken, SentType.SENT_CHI))
                        {
                         
                            int passedSent = 0;
                            for (int i = tokenIndex; i < tokens.Count; i++)
                            {
                                Token t = tokens[i];
                            
                                if (t.type == TokenType.ENGLISH)
                                {
                                    int pos = i;
                                    int engInChi = nextEngTokenNumInChi(ref tokens, ref pos);
                                    if (engInChi > ENG_TOKEN_IN_CHINESE)
                                    {	// 大于容忍英文词数
                                        if (i == tokenIndex)
                                        {
                                            // Pair处切开,如果Pair中第一个Token是英文
                                            // 之后可以转换为SENT_ENG
                                            tokenIndex--;
                                        }
                                        else
                                        {
                                            // 否则，没有办法转换为SENT_CHI，只能从中文处切开
                                            tokenIndex = i;
                                        }
                                        pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_CHI, ref sentences);
                                        return;
                                    }
                                    else
                                    {
                                        tokenIndex = i;
                                    }
                                }
                                else if ((t.type == TokenType.SPACE && tokens[i - 1].type == TokenType.CHINESE)
                                    || (t.type == TokenType.PUNCTUATION && isSplitPunc(ref t, SentType.SENT_CHI)))
                                {
                                    if (++passedSent > PAIR_PUNC_TOLARANCE)
                                    {
                                        break;
                                    }
                                }
                                else if (isEndPairPunc(ref t, SentType.SENT_CHI, ref curToken.val))
                                {
                                    tokenIndex = i + 1;
                                    canSplitAt = tokenIndex;		// 成功找到一对匹配标点，设定一个标记
                                    break;
                                }
                            }
                        }
                        break;
                    case TokenType.SPACE:			// 要不要reset？
                        if (canSplitAt != -1)
                        {
                            pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_CHI, ref sentences);
                            return;
                        }
                        canSplitAt = -1;
                        if (tokens[tokenIndex - 2].type == TokenType.CHINESE || tokens[tokenIndex - 2].type == TokenType.PUNCTUATION)
                        {
                            pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_CHI, ref sentences);
                            return;
                        }
                        break;
                }
            }
            // reach here, means all tokens are read
            pushback_sent(ref tokens, sentStart, tokens.Count, SentType.SENT_CHI, ref sentences);
        }


        /**
         * 英文分句函数
         * 没有设定canSplitAt标志时，分句规则是：
         * 1. 遇到中文Token
         * 2. 遇到isSplitPunc(SENT_ENG)英文分句标点,并且前一个Token不是首字母大写的ENGLISH Token
         *
         * 设定canSplitAt标志的规则是：
         * 1. 遇到'.',且前一个Token是首字母大写的ENGLISH Token
         * 2. 一对PairPunc成功匹配结束.注意，当遇到isBeginPairPunct()时，向前搜索PAIR_PUNC_TOLARANCE个字符，
         *    以防遇到只有开始没有结束的错误情况
         *
         * 设定canSplitAt标志时，分句规则是：
         * 1. 下一个Token为首字母大写的ENGLISH Token，则在canSplitAt处分句
         * 2. 下一个Token为isBeginPairPunc()
         *
         * @param tokens
         * @param tokenNum	Token总数
         * @param tokenIndex	下一个处理的Token
         * @param sentences	存放分句结果的容器
         */
        void engSplit(ref List<Token> tokens, ref int tokenIndex, ref List<Sentence> sentences)
        {
            int canSplitAt = -1;	// 一个临时标志位，设定在可能可以分句的地方
            int sentStart = tokenIndex;
            while (tokenIndex < tokens.Count)
            {
                Token curToken = tokens[tokenIndex++];
                switch (curToken.type)
                {
                    case TokenType.CHINESE:
                        --tokenIndex;
                        pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_ENG, ref sentences);
                        return;
                    case TokenType.ENGLISH:
                        if (canSplitAt != -1)
                        {
                            if (char.IsUpper(curToken.val[0]))
                            {	// 首字母大写
                                tokenIndex = canSplitAt;
                                pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_ENG, ref sentences);
                                return;
                            }
                            canSplitAt = -1;
                        }
                        break;
                    case TokenType.NUMBER:
                        break;
                    case TokenType.PUNCTUATION:
                        if (canSplitAt != -1)
                        {
                            // 如果有标记，并且又是另一个Pair的开始
                            if (isBeginPairPunc(curToken, SentType.SENT_ENG))
                            {
                                tokenIndex = canSplitAt;
                                pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_ENG, ref sentences);
                                return;
                            }
                            canSplitAt = -1;
                        }
                        if (isSplitPunc(ref curToken, SentType.SENT_ENG))
                        {
                            if (curToken.val == "." && tokenIndex > 1 && tokens[tokenIndex - 2].type == TokenType.ENGLISH && char.IsUpper(tokens[tokenIndex - 2].val[0]))
                            {
                                // 如果前一个是大写字母开头单词，设定一个标志
                                // 这种有可能是缩写，如Ltd., Mr., HT., etc.
                                canSplitAt = tokenIndex + 1;
                            }
                            else
                            {
                                pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_ENG, ref sentences);
                                return;
                            }
                        }
                        else if (isBeginPairPunc(curToken, SentType.SENT_ENG))
                        {
                            int passedSent = 0;
                            for (int i = tokenIndex; i < tokens.Count; i++)
                            {
                                Token t = tokens[i];
                                if (t.type == TokenType.CHINESE)
                                {
                                    if (i == tokenIndex)
                                    {
                                        // 如果Pair中第一个Token是中文,则从Pair处切开
                                        // 之后可以转换为SENT_CHI
                                        tokenIndex--;
                                    }
                                    else
                                    {
                                        // 没有办法转换为SENT_CHI，只能从中文处切开
                                        // 如果从Pair处切开，会导致无限循环,如"He says [China 中国]"
                                        tokenIndex = i;
                                    }
                                    pushback_sent(ref tokens, sentStart, tokenIndex, SentType.SENT_ENG, ref sentences);
                                    return;
                                }
                                else if (t.type == TokenType.PUNCTUATION && isSplitPunc(ref t, SentType.SENT_ENG)
                                        && ++passedSent > PAIR_PUNC_TOLARANCE)
                                {
                                    // 超过指定句话，认为缺少另一半
                                    // 跳出向前搜索，忽略这个isBeginPairPunc()
                                    break;
                                }
                                else if (isEndPairPunc(ref t, SentType.SENT_ENG, ref curToken.val))
                                {
                                    // 找到另一半
                                    tokenIndex = i + 1;
                                    canSplitAt = tokenIndex;		// 成功找到一对匹配标点，设定一个标记
                                    break;
                                }
                            }
                        }
                        break;
                    case TokenType.SPACE:
                        break;
                }
            }
            // reach here, means all tokens are read
            pushback_sent(ref tokens, sentStart, tokens.Count, SentType.SENT_ENG, ref sentences);
        }



        /**
         * 分句接口函数
         * @param line	需要分句的字符串
         * @param sentences	存放输出结果的容器
         */
         List<Sentence> sentsplit(ref string line)
        {
            List<Sentence> sentences = new List<Sentence>();
            List<Token> tokens = new List<Token>();			// 存放Tokens
            tokenize(ref line, ref tokens);		// 先做Tokenization

            int tokenIndex = 0;			// 下一个处理的Token
            int sentStart = -1;			// 该句句子开始的Token
            while (tokenIndex < tokens.Count)
            {
                if (sentStart == -1)
                {
                    sentStart = tokenIndex;
                }
                Token curToken = tokens[tokenIndex++];
               
                switch (curToken.type)
                {
                    case TokenType.CHINESE:
                        tokenIndex = sentStart;
                        // 中文分句,函数内会改变tokenIndex
                        chiSplit(ref tokens, ref tokenIndex, ref sentences);
                        sentStart = -1;
                        break;
                    case TokenType.ENGLISH:
                        tokenIndex = sentStart;
                        // 英文分句,函数内会改变tokenIndex
                        engSplit(ref tokens, ref tokenIndex, ref sentences);
                        sentStart = -1;
                        break;
                    case TokenType.NUMBER:
                        break;
                    case TokenType.PUNCTUATION:		// 句子未开始，直接输出
                        break;
                    case TokenType.SPACE:			// 句子未开始，忽略空格
                        if (sentStart + 1 == tokenIndex)
                            sentStart = -1;
                        break;
                }
            }

            return sentences;
        }



        /**
         * @return: true if no block found
         */
        bool findBlock(ref List<Sentence> sentences, SentType sentType, ref bool[] beginMarks, ref bool[] endMarks)
        {
            int sentBegin = -1;
            int sentNum = 0;
            int sentUnderNormal = 0;
            bool found = false;
            int i = 0;

            for (i = 0; i < sentences.Count; i++)
            {
                Sentence s = sentences[i];
                if (s.type == sentType && (s.tokenNum >= NORMAL_TOKENS_IN_BLOCK || (s.tokenNum >= MIN_TOKENS_IN_BLOCK && sentNum > 0 && sentUnderNormal < MAX_SENTS_UNDER_NORMAL)))
                {
                    if (sentNum == 0)
                    {
                        sentBegin = i;
                    }
                    sentNum++;

                    if (s.tokenNum >= NORMAL_TOKENS_IN_BLOCK)
                    {
                        sentUnderNormal = 0;
                    }
                    else
                    {
                        sentUnderNormal++;
                    }
                }
                else
                {
                    if (sentNum >= MIN_SENTS)
                    {
                        beginMarks[sentBegin] = true;
                        endMarks[i - 1] = true;
                        found = true;
                    }
                    sentNum = 0;
                    sentUnderNormal = 0;
                }
            }
            if (sentNum >= MIN_SENTS)
            {
                beginMarks[sentBegin] = true;
                endMarks[sentences.Count - 1] = true;
                found = true;
            }
            return found;
        }


        List<SentenceToken> output(ref List<Sentence> sentences)
        {
            bool[] beginMarks;
            bool[] endMarks;

            beginMarks = new bool[sentences.Count];
            endMarks = new bool[sentences.Count];

            int i;
            for (i = 0; i < sentences.Count; i++)
            {
                beginMarks[i] = false;
                endMarks[i] = false;
            }

            List<SentenceToken> stList = new List<SentenceToken>();

            bool engBlock = findBlock(ref sentences, SentType.SENT_ENG, ref beginMarks, ref endMarks);
            bool chiBlock = findBlock(ref sentences, SentType.SENT_CHI, ref beginMarks, ref endMarks);
            bool showBlock = (engBlock && chiBlock);		// 如果只有一种Block，那也不用标出

            for (i = 0; i < sentences.Count; i++)
            {
                if (showBlock && beginMarks[i])
                {
                    //insert flag token for block text
                    SentenceToken st = new SentenceToken();
                    st.type = OutputType.B_TYPE;
                    st.strSent = "";
                    stList.Add(st);
                   
                }
                Sentence s = sentences[i];
                if (s.type != SentType.SENT_UNKN && s.tokenNum > 3)
                {
                    char type = (s.type == SentType.SENT_UNKN ? 'U' : s.type == SentType.SENT_ENG ? 'E' : 'C');
                    SentenceToken st = new SentenceToken();
                    if (type == 'U')
                    {
                        st.type = OutputType.U_TYPE;
                    }
                    else if (type == 'E')
                    {
                        st.type = OutputType.E_TYPE;
                    }
                    else if (type == 'C')
                    {
                        st.type = OutputType.C_TYPE;
                    }

                    st.strSent = s.value;
                    stList.Add(st);    
                }

                if (showBlock && endMarks[i])
                {
                    //insert flag token for block text
                    SentenceToken st = new SentenceToken();
                    st.type = OutputType.L_TYPE;
                    st.strSent = "";
                    stList.Add(st);
                }
            }

            return stList;
        }


        /**
         * 接口函数
         * 同时进行分句，和分段
         */
        public List<SentenceToken> DoMultiSplit(string strCont)
        {

            List<Sentence> sentences = new List<Sentence>();
            List<SentenceToken> stList;

            sentences = sentsplit(ref strCont);
            stList = output(ref sentences);

            return stList;
        }

    }
}
