﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using CRFSharp;
using CRFSharpWrapper;
using AdvUtils;

namespace WordSeg
{
    public class Token
    {
        public int offset;
        public int len;
        public string strTerm;
        public List<string> strTagList;

        public Token()
        {
            strTerm = "";
            strTagList = new List<string>();
            offset = 0;
            len = 0;
        }

        public bool ExistTag(string strTag)
        {
            return strTagList.Contains(strTag);
        }
    }

    public class Tokens
    {
        //中间数据
        public crf_seg_out[] crf_seg_out;
        public SegDecoderTagger crf_tag;
        public List<Token> modelTokenList;
        public List<List<string>> inbuf; // feature set for CRF decoder
        public List<Lemma> dm_r;
        public List<int> dm_offsetList;

        //结果数据
        public List<Token> tokenList;

        //词典粒度切分
        public List<Token> subTokenList;

        public Tokens(CRFSharpWrapper.Decoder crf)
        {
            if (crf != null)
            {
                //Initialize data structure for CRF model
                crf_seg_out = new CRFSharpWrapper.crf_seg_out[1];
                crf_seg_out[0] = new CRFSharpWrapper.crf_seg_out();
                crf_tag = crf.CreateTagger();
                if (crf_tag != null)
                {
                    crf_tag.set_nbest(1);
                    crf_tag.set_vlevel(0);
                }
            }

            inbuf = null;
            modelTokenList = new List<Token>();

            dm_r = new List<Lemma>();
            dm_offsetList = new List<int>();

            tokenList = new List<Token>();
            subTokenList = new List<Token>();
        }

        public Tokens()
        {
            crf_seg_out = null;
            crf_tag = null;

            inbuf = null;
            modelTokenList = new List<Token>();

            dm_r = new List<Lemma>();
            dm_offsetList = new List<int>();

            tokenList = new List<Token>();
            subTokenList = new List<Token>();
        }


        public void Clear()
        {
            dm_r.Clear();
            dm_offsetList.Clear();

            inbuf = null;
            modelTokenList.Clear();
            tokenList.Clear();
            subTokenList.Clear();
        }

    }

    public class WordSeg
    {
        DictMatch dictmatch;
        CRFSharpWrapper.Decoder crf;
        IGenerateFeature featureGenerator;

        public WordSeg()
        {
            dictmatch = new DictMatch();
            crf = new CRFSharpWrapper.Decoder();
        }
        

        //加载词汇字典
        public void LoadLexicalDict(string strLexicalDict, bool bLoadRawTextFormat = true)
        {
            if (bLoadRawTextFormat == true)
            {
                dictmatch.LoadDictFromRawText(strLexicalDict);
            }
            else
            {
                dictmatch.LoadDictFromBinary(strLexicalDict);
            }
        }

        //加载CRF统计模型
        public bool LoadModelFile(string strModelFileName, IGenerateFeature feaGen = null)
        {
            featureGenerator = feaGen;
            return crf.LoadModel(strModelFileName);
        }

        //加载字典与模型，创建全局数据结构
        public bool LoadAllDictModel(string strDictMatch, string strModelFileName,
            bool bLexicalDictRawText = true, IGenerateFeature feaGen = null)
        {
            //首先加载词汇字典
            LoadLexicalDict(strDictMatch, bLexicalDictRawText);

            //加载统计模型
            return LoadModelFile(strModelFileName, feaGen);
        }

        //创建线程相关的数据结构
        public Tokens CreateTokens()
        {
            Tokens t = new Tokens(crf);
            return t;
        }

        //切分主流程
        //1. 使用字典切分基本片段
        //2. 使用CRF模型进行语义片段切分，并且对特定类别的专名进行标注
        //3. 对1和2的结果进行整合
        //默认不使用CRF模型进行统计切分
        public int Segment(string strText, Tokens tokens, bool bUseCRFModel = false)
        {
            tokens.Clear();
            int ret;
            if (bUseCRFModel == true)
            {
                ret = Segment_DictMatch(strText, tokens);
                ret = Segment_Model(strText, tokens);
                ret = MergeLexicalDictAndCRFResult(tokens);
            }
            else
            {
                Segment_DictMatch_FMM(strText, tokens);
                tokens.subTokenList = tokens.tokenList;
            }

            //merge contigious characters based on rules and update token list
            FillStringSegList(strText, tokens.tokenList);
            tokens.tokenList = MergeTerms(tokens.tokenList);

            if (bUseCRFModel == true)
            {
                //generate fine-grain result
                tokens.subTokenList = SegmentSubTokenByDM(tokens.tokenList);   
            }

            return 0;
        }

        private void Segment_DictMatch_FMM(string strText, Tokens tokens)
        {
            //只使用字典进行切分
            dictmatch.Search(strText, ref tokens.dm_r, ref tokens.dm_offsetList, DictMatch.DM_OUT_FMM);
            int lastSegmentEndOffset = 0;
            Token t;
            for (int i = 0; i < tokens.dm_r.Count; i++)
            {
                while (tokens.dm_offsetList[i] > lastSegmentEndOffset)
                {
                    //some lexical is not in dictionary
                    t = new Token();
                    t.offset = lastSegmentEndOffset;
                    t.len = 1;

                    tokens.tokenList.Add(t);
                    lastSegmentEndOffset++;
                }

                t = new Token();
                t.offset = tokens.dm_offsetList[i];
                t.len = (int)tokens.dm_r[i].len;

                if (tokens.dm_r[i].strProp != null &&
                    tokens.dm_r[i].strProp.Length > 0)
                {
                    t.strTagList.Add(tokens.dm_r[i].strProp);
                }

                tokens.tokenList.Add(t);
                lastSegmentEndOffset = t.offset + t.len;
            }

            while (lastSegmentEndOffset < strText.Length)
            {
                //some lexical is not in dictionary
                t = new Token();
                t.offset = lastSegmentEndOffset;
                t.len = 1;

                tokens.tokenList.Add(t);
                lastSegmentEndOffset++;
            }
        }

        private static void FillStringSegList(string strText, List<Token> tokenList)
        {
            foreach (Token token in tokenList)
            {
                token.strTerm = strText.Substring(token.offset, token.len);
            }
        }

        private List<Token> SegmentSubTokenByDM(List<Token> tokenList)
        {
            List<Token> subTokenList = new List<Token>();
            for (int i = 0; i < tokenList.Count; i++)
            {
                Tokens tmpTokens = new Tokens();
                Segment_DictMatch_FMM(tokenList[i].strTerm, tmpTokens);

                int offset = tokenList[i].offset;
                foreach (Token item in tmpTokens.tokenList)
                {
                    Token token = new Token();
                    token.offset = offset;
                    token.len = item.len;
                    token.strTagList = tokenList[i].strTagList;
                    token.strTerm = tokenList[i].strTerm.Substring(item.offset, item.len);

                    subTokenList.Add(token);

                    offset += item.len;
                }
            }

            return subTokenList;
        }

        enum MergeType
        {
            MERGE_ALL_ASCII, 
            MERGE_ALL_DIGIT,
            MERGE_NONE
        };

        //merge contigious characters based on rules and update token list
        private List<Token> MergeTerms(List<Token> tokenList)
        {
            List<Token> rstTokenList = new List<Token>();
            Token asciiToken = null;
            MergeType mt = MergeType.MERGE_NONE; ;
            for (int i = 0; i < tokenList.Count; i++)
            {
                MergeType cmt;
                if (IsAllLetters(tokenList[i].strTerm) == true)
                {
                    cmt = MergeType.MERGE_ALL_ASCII;
                }
                else if (IsAllDigit(tokenList[i].strTerm) == true)
                {
                    cmt = MergeType.MERGE_ALL_DIGIT;
                }
                else
                {
                    cmt = MergeType.MERGE_NONE;
                }

                if (tokenList[i].strTagList.Count == 0 && cmt != MergeType.MERGE_NONE)
                {
                    //need to merge terms
                    if (cmt != mt)
                    {
                        //merge type is different, append the current mergeing terms into list
                        //and start a new one.
                        if (asciiToken != null)
                        {
                            rstTokenList.Add(asciiToken);
                            asciiToken = null;
                        }
                    }

                    if (asciiToken == null)
                    {
                        asciiToken = new Token();
                        asciiToken.offset = tokenList[i].offset;
                        asciiToken.len = tokenList[i].len;
                        asciiToken.strTerm = tokenList[i].strTerm;
                        mt = cmt;
                    }
                    else
                    {
                        asciiToken.len += tokenList[i].len;
                        asciiToken.strTerm += tokenList[i].strTerm;
                    }
                }
                else
                {
                    //no need to merge terms, so append merged terms into list
                    if (asciiToken != null)
                    {
                        rstTokenList.Add(asciiToken);
                        asciiToken = null;
                    }
                    rstTokenList.Add(tokenList[i]);
                }
            }

            if (asciiToken != null)
            {
                rstTokenList.Add(asciiToken);
            }
            return rstTokenList;
        }

        private bool IsAllLetters(string str)
        {
            bool ret = true;
            str = str.ToLower();
            foreach (char ch in str)
            {
                if (ch < 'a' || ch > 'z')
                {
                    ret = false;
                    break;
                }
            }

            return ret;
        }

        private bool IsAllDigit(string str)
        {
            bool ret = true;
            str = str.ToLower();
            foreach (char ch in str)
            {
                if (ch < '0' || ch > '9')
                {
                    ret = false;
                    break;
                }
            }

            return ret;
        }



        //基于字典的切分流程
        private int Segment_DictMatch(string str,Tokens tokens)
        {   
            dictmatch.Search(str, ref tokens.dm_r, ref tokens.dm_offsetList, DictMatch.DM_OUT_ALL);
            return 0;
        }

        //使用CRF模型进行语义片段切分以及特定类别的命名实体标注
        private int Segment_Model(string str, Tokens tokens)
        {
            if (str == null || str.Length == 0)
            {
                return 0;
            }

            //Generate input string's feature set for CRF model
            List<List<string>> sinbuf = new List<List<string>>();
            if (featureGenerator == null)
            {
                //If no specific feature generator, using the default one
                sinbuf = BuildDefaultFeatureSet(str);
            }
            else
            {
                sinbuf = featureGenerator.GenerateFeature(str);
            }
            tokens.inbuf = sinbuf;

            //Break and tag string by CRF model
            int ret = crf.Segment(tokens.crf_seg_out, tokens.crf_tag, tokens.inbuf);
            crf_seg_out item = tokens.crf_seg_out[0];
            for (int j = 0; j < item.Count; j++)
            {
                int offset = item.tokenList[j].offset;
                int len = item.tokenList[j].length;
                string strNE = item.tokenList[j].strTag;

                //Create word breaker token instance
                Token token = new Token();
                token.offset = offset;
                token.len = len;
                if (strNE.Length > 0)
                {
                    token.strTagList.Add(strNE);
                }

                tokens.modelTokenList.Add(token);
            }

            return 0;
        }

        private static List<List<string>> BuildDefaultFeatureSet(string str)
        {
            List<List<string>> sinbuf = new List<List<string>>();
            //Using default feature set which only has 1-dim character feature
            foreach (char ch in str)
            {
                sinbuf.Add(new List<string>());
                sinbuf[sinbuf.Count - 1].Add(ch.ToString());
            }

            return sinbuf;
        }
        

        private int GetLongestDictBreakResult(Dictionary<int, string> list, int lmt)
        {
            int max = 0;
            foreach (KeyValuePair<int, string> pair in list)
            {
                if (max < pair.Key && pair.Key <= lmt)
                {
                    max = pair.Key;
                }
            }

            return max;
        }

        //Merge lexical term match and CRF model result
        private int MergeLexicalDictAndCRFResult(Tokens tokens)
        {
            //Generate all possible dict match segment index
            Dictionary<int, Dictionary<int, string>> dm_off2len2ne = new Dictionary<int, Dictionary<int, string>>();
            for (int j = 0; j < tokens.dm_r.Count; j++)
            {
                int len = (int)tokens.dm_r[j].len;
                int offset = tokens.dm_offsetList[j];
                string strProp = tokens.dm_r[j].strProp;

                if (dm_off2len2ne.ContainsKey(offset) == false)
                {
                    dm_off2len2ne.Add(offset, new Dictionary<int, string>());
                }
                dm_off2len2ne[offset].Add(len, strProp);
            }

            foreach (Token token in tokens.modelTokenList)
            {
                //对于模型判定为专名的term，直接豁免进入到结果集合中，不做策略检查
                if (token.strTagList.Count > 0)
                {
                    if (dm_off2len2ne.ContainsKey(token.offset) == true)
                    {
                        if (dm_off2len2ne[token.offset].ContainsKey(token.len) == true)
                        {
                            //字典匹配与模型匹配在offset和len一致，无冲突
                            if (dm_off2len2ne[token.offset][token.len] != null &&
                                dm_off2len2ne[token.offset][token.len].Length > 0)
                            {
                                token.strTagList.Add(dm_off2len2ne[token.offset][token.len]);
                            }
                        }
                    }

                    tokens.tokenList.Add(token);
                    continue;
                }

                if (dm_off2len2ne.ContainsKey(token.offset) == true)
                {
                    if (dm_off2len2ne[token.offset].ContainsKey(token.len) == true)
                    {
                        //字典匹配与模型匹配在offset和len一致，无冲突
                        if (dm_off2len2ne[token.offset][token.len] != null &&
                            dm_off2len2ne[token.offset][token.len].Length > 0)
                        {
                            token.strTagList.Add(dm_off2len2ne[token.offset][token.len]);
                        }
                        tokens.tokenList.Add(token);

                        continue;
                    }
                }

                //模型切分粒度与词典不一致，按照词典粒度对该token进行正向最大匹配切分
                int bpos = token.offset;
                int epos = token.offset + token.len;
                while (bpos < epos)
                {
                    //以dict match粒度对CRF的切分结果（非专名token）进行正向最大匹配切分
                    int newlen = 0;
                    if (dm_off2len2ne.ContainsKey(bpos) == true)
                    {
                        //Get the longest length of broken result candidates from bpos position
                        newlen = GetLongestDictBreakResult(dm_off2len2ne[bpos], epos - bpos);
                    }

                    //Not found matched term from lexical dictionary, break current character as single token
                    if (newlen == 0)
                    {
                        newlen = 1;
                    }

                    Token newToken = new Token();
                    newToken.offset = bpos;
                    newToken.len = newlen;

                    if (dm_off2len2ne.ContainsKey(bpos) == true &&
                        dm_off2len2ne[bpos].ContainsKey(newlen) == true &&
                        dm_off2len2ne[bpos][newlen] != null &&
                        dm_off2len2ne[bpos][newlen].Length > 0)
                    {
                        newToken.strTagList.Add(dm_off2len2ne[bpos][newlen]);
                    }

                    tokens.tokenList.Add(newToken);

                    bpos += newlen;
                }
            }

            return 0;
        }
    }
}
