package com.coccoc;

import cn.hutool.extra.pinyin.PinyinUtil;

import java.util.*;

/**
 * Rewrite class com.coccoc.Tokenizer for Elasticsearch integration.
 *
 * @author duydo, CocCoc team
 */
public class Tokenizer {

    public static final String TOKENIZER_SHARED_LIB_NAME = "coccoc_tokenizer_jni";

    static {
        System.loadLibrary(TOKENIZER_SHARED_LIB_NAME);
    }


    public enum TokenizeOption {
        NORMAL(0), HOST(1), URL(2);

        private final int value;

        TokenizeOption(int value) {
            this.value = value;
        }

        public int value() {
            return value;
        }
    }

    public static final String SPACE = " ";
    public static final String UNDERSCORE = "_";
    public static final String COMMA = ",";
    public static final String DOT = ".";


    private static String dictPath = null;

    private static final class Loader {

        private static final Tokenizer INSTANCE = get();

        private Loader() {
        }

        private static Tokenizer get() {
            return new Tokenizer(dictPath);
        }
    }

    public static Tokenizer getInstance(String dictPath) {
        Tokenizer.dictPath = dictPath;
        return Loader.INSTANCE;
    }

    private Tokenizer(String dictPath) {
        int status = initialize(dictPath);
        if (0 > status) {
            throw new RuntimeException(String.format("Cannot initialize Tokenizer: %s", dictPath));
        }
    }

    public List<Token> segment(String text, TokenizeOption option, boolean keepPunctuation) {
        if (text == null) {
            throw new IllegalArgumentException("text is null");
        }
        long resPointer = segmentPointer(text, false, option.value(), keepPunctuation);
        if (resPointer < 0) {
            throw new RuntimeException("Cannot segment the text");
        }

        final Set<Token> tokens = new HashSet<>();
        // Positions from JNI implementation .cpp file
        int rangesSize = Unsafe.getInt(resPointer + 8 * 2);
        long rangesDataPointer = Unsafe.getLong(resPointer + 8 * 3);
        int tokenSize = 4 * 6;
        for (int i = 0; i < rangesSize; ++i) {
            // Positions of UNSAFE values are calculated from {struct Token} in tokenizer.hpp
            int originalStartPos = Unsafe.getInt(rangesDataPointer + i * tokenSize + 8);
            int originalEndPos = Unsafe.getInt(rangesDataPointer + i * tokenSize + 12);
            int type = Unsafe.getInt(rangesDataPointer + i * tokenSize + 16);
            int segType = Unsafe.getInt(rangesDataPointer + i * tokenSize + 20);

            // Build substring from UNSAFE array of codepoints
            final StringBuilder sb = new StringBuilder();
            for (int j = originalStartPos; j < originalEndPos; ++j) {
                sb.appendCodePoint(text.charAt(j));
            }
//            System.out.println(text + " start: " + originalStartPos + " end: " + originalEndPos);
            String text1 = segType == 1 ? sb.toString().replace(COMMA, DOT) : sb.toString();
            if (text1.length() > 0) {
                tokens.add(new Token(text1.trim(), Token.Type.fromInt(type), Token.SegType.fromInt(segType), originalStartPos, originalEndPos));
            }
        }
        freeMemory(resPointer);
        try {
            if (text.length() > 0) {
                boolean chinese = PinyinUtil.isChinese(text.charAt(0));
                if (chinese) {
                    oneWordTokens(text, tokens);
                } else {
                    moreWordTokens(text, tokens);
                }
            }
            for (int i = 0; i < text.length(); i++) {
                char c = text.charAt(i);
                if (isRi(c)) {
                    tokens.add(new Token(String.valueOf(c), Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, i, i + 1));
                } else if (isHan(c)) {
                    tokens.add(new Token(String.valueOf(c), Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, i, i + 1));
                } else if (PinyinUtil.isChinese(c)) {
                    boolean b = text.length() > i + 1;
                    if (b) {
                        char cNext = text.charAt(i + 1);
                        if (PinyinUtil.isChinese(cNext)) {
                            tokens.add(new Token(c + "" + cNext, Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, i, i + 1));
                        }
                    }
                    tokens.add(new Token(String.valueOf(c), Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, i, i + 1));
                }
            }

        } catch (Exception e) {
            e.printStackTrace();
        }
        List<Token> tokens1 = new ArrayList<>(tokens);
        Collections.sort(tokens1);
        return tokens1;
    }

    public static boolean isRi(char c) {
        return String.valueOf(c).matches("[\\u3040-\\u30FF]");
    }

    public static boolean isHan(char ch) {
        return (ch > 0x3130 && ch < 0x318F) || (ch >= 0xAC00 && ch <= 0xD7A3);
    }

    //Hắc Nguyệt Quang / 黑月光 (Trường Nguyệt Tẫn Minh OST)
    //You Are Missing From Me / 好想再见
    //好想 (排箫)
    //撒野 kaiser meow todo
    //may Lang Thang TaynguyenSound,Tùng TeA,PC,New oulz  ,， 分词 todo
    private void oneWordTokens(String text, Set<Token> tokens) {
        if (text.contains("(")) {
            String[] split = text.split("\\(");
            if (split.length >= 2) {
                String splitStr1 = split[1].trim();
                if (splitStr1.length() > 1) {
                    boolean chinese = PinyinUtil.isChinese(splitStr1.charAt(0));
                    if (!chinese) {
                        text = split[0].trim();
                    }
                }
            }
        }
        text = text.replaceAll(" \\(", "");
        text = text.replaceAll("\\(", "");
        text = text.replaceAll(" - ", "");
        text = text.replaceAll("-", "");
        text = text.replace("...", " ");
        text = text.replace(".", " ");
        text = text.replaceAll("\\)", "");
        text = text.replace("   ", " ");
        text = text.replace("  ", " ");
        if (text.length() >= 1) {
            for (int i = 0; i < text.length(); i++) {
                String substring = text.substring(i, i + 1).trim();
                if (substring.length() > 0) {
                    boolean chinese = PinyinUtil.isChinese(substring.charAt(0));
                    if (chinese) {
                        tokens.add(new Token(substring, Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, i, i + 1));
                    } else if (isRi(substring.charAt(0))) {
                        tokens.add(new Token(substring, Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, i, i + 1));
                    } else if (isHan(substring.charAt(0))) {
                        tokens.add(new Token(substring, Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, i, i + 1));
                    }
                }
            }
        }
        if (text.length() >= 2) {
            for (int i = 0; i < text.length() - 1; i++) {
                String substring = text.substring(i, i + 2).trim();
                if (substring.length() > 0) {
                    boolean chinese = PinyinUtil.isChinese(substring.charAt(0));
                    if (chinese && substring.length() > 1) {
                        tokens.add(new Token(substring.trim(), Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, i, i + 2));
                    }
                }
            }
        }
        if (text.length() >= 3) {
            for (int i = 0; i < text.length() - 2; i++) {
                String substring = text.substring(i, i + 3).trim();
                if (substring.length() > 0) {
                    boolean chinese = PinyinUtil.isChinese(substring.charAt(0));
                    if (chinese && substring.length() > 2) {
                        tokens.add(new Token(substring, Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, i, i + 3));
                    }
                }
            }
        }
    }

    private void moreWordTokens(String text, Set<Token> tokens) {
        String[] split = new String[]{};
        if (text.contains("/")) {
            split = text.split("/");
        } else if (text.contains("(")) {
            split = text.split("\\(");
        }
        if (split.length > 1) {
            String trim = split[1].trim();
            if (trim.length() > 0) {
                boolean chinese = PinyinUtil.isChinese(trim.charAt(0));
                if (chinese) {
                    oneWordTokens(trim, tokens);
                }
            }
        }
        text = text.replaceAll(" \\(", " ");
        text = text.replaceAll(" - ", " ");
        text = text.replaceAll(" / ", " ");
        text = text.replaceAll("\\(", " ");
        text = text.replaceAll("-", " ");
        text = text.replace("...", " ");
        text = text.replace(".", " ");
        text = text.replaceAll("/", " ");
        text = text.replaceAll("\\)", "");
        text = text.replaceAll("!", "");
        text = text.replaceAll("'", "");
        text = text.replace("   ", " ");
        text = text.replace("  ", " ");
        if (text.contains(" ")) {
            String[] terms = text.split(" ");
            for (String term : terms) {
                int startIndex = text.indexOf(term);
                int endIndex = startIndex + term.length();
                String trim = text.substring(startIndex, endIndex).trim();
                if (trim.length() > 0) {
                    tokens.add(new Token(trim, Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, startIndex, endIndex));
                }
            }
        }

        List<Integer> list = new ArrayList<>();
        list.add(0);
        for (int i = 0; i < text.length(); i++) {
            char c = text.charAt(i);
            if (" ".equals(c + "")) {
                list.add(i);
            }
        }
        if (list.size() >= 2) {
            for (int i = 0; i < list.size() - 1; i++) {
                int start = i;
                int end = i + 2;
                Integer startIndex;
                Integer endIndex;
                if (start == 0) {
                    startIndex = list.get(start);
                } else {
                    startIndex = list.get(start) + 1;
                }
                if (end >= list.size()) {
                    endIndex = text.length();
                } else {
                    endIndex = list.get(end);
                }
                String trim = text.substring(startIndex, endIndex).trim();
                if (trim.length() > 0) {
                    tokens.add(new Token(trim, Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, startIndex, endIndex));
                }
            }
        }
        if (list.size() >= 3) {
            for (int i = 0; i < list.size() - 2; i++) {
                int start = i;
                int end = i + 3;
                Integer startIndex;
                Integer endIndex;
                if (start == 0) {
                    startIndex = list.get(start);
                } else {
                    startIndex = list.get(start) + 1;
                }
                if (end >= list.size()) {
                    endIndex = text.length();
                } else {
                    endIndex = list.get(end);
                }
                String trim = text.substring(startIndex, endIndex).trim();
                if (trim.length() > 0) {
                    tokens.add(new Token(trim, Token.Type.WORD, Token.SegType.OTHER_SEG_TYPE, startIndex, endIndex));
                }
            }
        }
    }

    //Calls CocCoc lib's segmentPointer function
    public native long segmentPointer(String text, boolean forTransforming, int tokenizeOption, boolean keepPunctuation);

    //Calls CocCoc lib's freeMemory function
    private native void freeMemory(long resPointer);

    //Calls CocCoc lib's initialize function
    private native int initialize(String dictPath);
}
