package com.rookie.tool.module.util.lexicon;

import org.springframework.util.StringUtils;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

import java.io.*;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.Channels;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.stream.Collectors;
import java.util.zip.InflaterOutputStream;

/**
 * @author rookie
 */
public class AnalyserUtils {

    /**
     * 百度词库解析器
     *
     * @param filePath 文件地址
     * @return 返回所有中文词库
     */
    public static List<String> baiDuAnalyser(String filePath) throws Exception {
        // 读取百度词库
        ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
        FileChannel fChannel = new RandomAccessFile(filePath, "r").getChannel();
        fChannel.transferTo(0, fChannel.size(), Channels.newChannel(dataOut));
        fChannel.close();
        ByteBuffer dataRawBytes = ByteBuffer.wrap(dataOut.toByteArray());
        dataRawBytes.order(ByteOrder.LITTLE_ENDIAN);

        byte[] buf = new byte[1024];
        byte[] pinyin = new byte[1024];
        // 指针移动到0x350
        dataRawBytes.position(0x350);

        List<String> wordList = new ArrayList<String>();
        String word = null;
        while (dataRawBytes.position() < dataRawBytes.capacity()) {
            //得到词的字节长度
            int length = dataRawBytes.getShort();
            dataRawBytes.getShort();
            try {
                //跳过拼音
                dataRawBytes.get(pinyin, 0, 2 * length);
                //得到实际的词
                dataRawBytes.get(buf, 0, 2 * length);
                word = new String(buf, 0, 2 * length, StandardCharsets.UTF_16LE);
                wordList.add(word.trim());
            } catch (Exception e) {
                return wordList.stream().distinct().collect(Collectors.toList());
            }

        }
        return wordList.stream().distinct().collect(Collectors.toList());
    }


    /**
     * qq词库分析器
     *
     * @param inputPath 文件路径
     * @return 所有中文词库
     * @throws Exception 异常
     */
    public static List<String> qqAnalyser(String inputPath) throws Exception {

        List<String> wordList = new ArrayList<>();

        ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
        FileChannel fChannel = new RandomAccessFile(inputPath, "r").getChannel();
        fChannel.transferTo(0, fChannel.size(), Channels.newChannel(dataOut));
        fChannel.close();

        ByteBuffer dataRawBytes = ByteBuffer.wrap(dataOut.toByteArray());
        dataRawBytes.order(ByteOrder.LITTLE_ENDIAN);

        int startZippedDictAddress = dataRawBytes.getInt(0x38);
        int zippedDictLength = dataRawBytes.limit() - startZippedDictAddress;

        dataOut.reset();
        Channels.newChannel(new InflaterOutputStream(dataOut)).write(
                ByteBuffer.wrap(dataRawBytes.array(), startZippedDictAddress, zippedDictLength));

        ByteBuffer dataUnzippedBytes = ByteBuffer.wrap(dataOut.toByteArray());
        dataUnzippedBytes.order(ByteOrder.LITTLE_ENDIAN);

        Channels.newChannel(new FileOutputStream(inputPath + ".unzipped")).write(dataUnzippedBytes);

        int unzippedDictStartAddress = -1;
        int idx = 0;

        byte[] byteArray = dataUnzippedBytes.array();
        while (unzippedDictStartAddress == -1 || idx < unzippedDictStartAddress) {
            int pinyinStartAddress = dataUnzippedBytes.getInt(idx + 0x6);
            int pinyinLength = dataUnzippedBytes.get(idx) & 0xff;
            int wordStartAddr = pinyinStartAddress + pinyinLength;
            int wordLength = dataUnzippedBytes.get(idx + 0x1) & 0xff;
            if (unzippedDictStartAddress == -1) {
                unzippedDictStartAddress = pinyinStartAddress;
            }
            String word = new String(Arrays.copyOfRange(byteArray, wordStartAddr, wordStartAddr + wordLength),
                    StandardCharsets.UTF_16LE);
            wordList.add(word.trim());

            idx += 0xa;
        }
        return wordList.stream().distinct().collect(Collectors.toList());
    }


    /**
     * 使用IK分词器分析
     *
     * @param target 分析字符串
     * @return 分词后的结果
     * @throws Exception 异常
     */
    public static List<String> analyzer(String target) throws Exception {
        if (StringUtils.isEmpty(target)) {
            return new LinkedList<>();
        }
        List<String> result = new ArrayList<>();
        StringReader sr = new StringReader(target);
        // 关闭智能分词 (对分词的精度影响较大)
        IKSegmenter ik = new IKSegmenter(sr, false);
        Lexeme lex;
        while ((lex = ik.next()) != null) {
            String lexemeText = lex.getLexemeText();
            result.add(lexemeText);
        }

        return result.stream().distinct().collect(Collectors.toList());
    }


    public static void main(String[] args) throws Exception {
        List<String> text = analyzer("mybaits-plus的条件语句中如何加括号");
        System.out.println(text);
//        所占比重
        List<Integer> proportion = new LinkedList<>();
        for (String s : text) {
            proportion.add(0);
        }
//        计算分词在分词中出现的次数（存在关键字）、并且计算分词是在哪个分词出现的

        for (int i = 0; i < text.size(); i++) {
            calculateProportion(text, proportion, text.get(i), i);
        }

        System.out.println(proportion);

    }


    public static void calculateProportion(List<String> participleWords, List<Integer> proportion, String word, int times) {
        int showTimes = 0;
        for (int i = 0; i < participleWords.size(); i++) {
            if (participleWords.get(i).contains(word)) {
                proportion.set(i, proportion.get(i) + 1);
                showTimes++;
            }
        }
        proportion.set(times, proportion.get(times) + showTimes);
    }


}
