package gbench.appdemo.crawler.junit.analyzer;

import static gbench.appdemo.crawler.srch.JdbcSrchApplication.traverse;

import org.junit.jupiter.api.Test;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Stream;

import gbench.common.fs.FileSystem;
import gbench.common.matlib.rdd.*;
import gbench.common.tree.LittleTree.*;
import gbench.appdemo.crawler.analyzer.YuhuanAnalyzer;
import gbench.appdemo.crawler.analyzer.lexer.Lexeme;
import gbench.appdemo.crawler.analyzer.lexer.Trie;
import gbench.appdemo.crawler.analyzer.lexer.ILexProcessor;

import static gbench.appdemo.crawler.analyzer.YuhuanAnalyzer.*;
import static gbench.common.matlib.MatlibCanvas.*;
import static gbench.common.tree.LittleTree.IRecord.*;

@SuppressWarnings("unused")
public class JunitAnalyzer2 {
    
    /**
     * corpusPath 语料库路径
     * @return
     */
    public YuhuanAnalyzer getYuhuanAnalyzer(final String corpusDir) {
        final var corpus = new Trie<String>();// 语料库的根节点树
        final var corpusHome = new File(corpusDir);
        
        // 构建语料库
        ( ( corpusHome.isDirectory() )
            ? Stream.of(corpusHome.listFiles())
            : Stream.of(new File(FileSystem.path(corpusDir, this.getClass())))
        ).filter(file->file.getAbsolutePath().endsWith(".txt")).forEach(file->{// 创建语料文件
            FileSystem.utf8lines(file)
            .filter(line->!line.matches("^\\s*$")) // 去除空行
            .forEach(line->{// 提取语料词汇
                Stream.of(line.strip().split("[\n]+")) // 按行进行切分
                .forEach( e -> {// 设置词素 的节点属性信息
                    final var parts = e.split("[|]+"); // 用| 进行分割
                    final var keyword = parts[0].strip();
                    final var points = keyword.split("");// 把keyword 切分乘字符点
                    final var trie = Trie.addPoints2Trie(corpus, points)
                        .addAttribute("category","word")// 绑定词法类型信息
                        .addAttribute("meaning", keyword);
                    if(parts.length>1) { // 自定义属性的添加
                        trie.addAttribute("labels", parts[1]);
                    }
                });
            });// forEach
        });
        
        // 打印语料库
        //Trie.traverse(trie, e->System.out.println("\t".repeat(e.getLevel())+e.getValue()+"\t type:"+e.getAttribute("type")));
        
        @SuppressWarnings("resource")
        final var yuhuan = new YuhuanAnalyzer().addProcessor(new ILexProcessor() { // 创建一个 分词器
            
            public boolean isKeyword(final String line) {
                final var b = Stream.of(// pattern 列表
                        "[a-zA-Z]+", // 英文单词
                        "[a-zA-Z_-]+", // 英文标识符号
                        "[\\d\\.]+", // 阿拉伯数据
                        "[一二三四五六七八九十零百千万亿兆]+",// 中文数字
                        "[一二三四五六七八九十零百千万亿兆\\d]+年([一二三四五六七八九十零百千万亿兆\\d+](月([一二三四五六七八九十零百千万亿兆\\d+]日?)?)?)?") // 日期前缀的判断
                    .anyMatch(line::matches);
                return b;
            }
            
            /**
             * 把符号转换成 词素
             * @param symbol 符号
             * @return 词素
             */
            @Override 
            public Lexeme evaluate(final String symbol) {
                final var trieNode = corpus.getTrie(symbol.split(""));// 提取节点词素
                if(this.isKeyword(symbol)) return new Lexeme(symbol,"word",symbol);
                if(trieNode==null)return null;
                final var category = trieNode.strAttr("category");// 提取分类属性
                if (category == null) return null; // 前缀词不是词
                final var meaning = trieNode.strAttr("meaning");// 提取意义属性
                final var lexeme = new Lexeme(symbol,category,meaning).addAttributes(trieNode.getAttributes());
                return lexeme;
            }
            
            @Override 
            public boolean isPrefix( final String line ) {
                if( this.isKeyword(line) ) return true;
                
                return corpus.isPrefix(line.split(""));
            }// isPrefix
        });// addProcessor
        
        yuhuan.setAttribute("corpus", corpus);// 设置词库属性
        return yuhuan;
    }

    /**
     * 简单测试
     */
    @Test
    public void test() {
        final var yuhuan = this.getYuhuanAnalyzer("C:\\Users\\gbench\\Desktop\\中国笔记\\逻辑词汇");
        final var line = "金融资产转入方";
        yuhuan.analyzeS(line).forEach(e -> {
            println(e);
        });
    }

    /**
     * 语料库的 Trie 树演示
     */
    @Test
    public void foo() {
        final var yuhuan = this.getYuhuanAnalyzer("C:\\Users\\gbench\\Desktop\\中国笔记\\逻辑词汇");
        final var ar = new AtomicInteger(0);
        traverse(new File("C:\\Users\\gbench\\Desktop\\tmp\\file\\会计准则"), file -> {
            println(file);
            try (var br = new BufferedReader(new InputStreamReader(new FileInputStream(file), "utf8"))) {
                br.lines().filter(e->!e.strip().matches("\\s*")).peek(System.out::println)
                    .flatMap(line -> yuhuan.analyzeS(line,e->token2rec(e).add(REC(e.getAttributes()))))
                    .forEach(e->{
                        println(e);
                    });
            } catch (Exception e) {
                e.printStackTrace();
            }
        });
    }
    
    /**
     * 语料库的 Trie 树演示
     */
    @Test
    public void quz() {
        final var yuhuan = this.getYuhuanAnalyzer("C:\\Users\\gbench\\Desktop\\中国笔记\\逻辑词汇");
        final var ar = new AtomicInteger(0);
        traverse(new File("C:\\Users\\gbench\\Desktop\\tmp\\file\\古诗"), file -> {
            println(file);
            try (var br = new BufferedReader(new InputStreamReader(new FileInputStream(file), "utf8"))) {
                br.lines().filter(e->!e.strip().matches("\\s*"))
                    .flatMap(line -> yuhuan.analyzeS(line,e->token2rec(e).add(REC(e.getAttributes()))))
                    .filter(e->"word".equals(e.str("category")))
                    .sorted(IRecord.cmp("symbol",(String key,String t)->t.length(),false))
                    .map(e->e.str("symbol")).collect(PVec.pvec3clc(t->BasePair.bp(t,1)))
                    .groupByKey()
                    .map(e->IRecord.REC("name",e._1(),"size",e._2().size()))
                    .sorted(IRecord.cmp("size",(String key,Integer t)->t,false))
                    .forEach(word->{
                        System.out.println(ar.getAndIncrement()+")"+word);
                    });
            } catch (Exception e) {
                e.printStackTrace();
            }
        });
    }
    
    @Test
    public void bar() {
        final var yuhuan = this.getYuhuanAnalyzer("C:\\Users\\gbench\\Desktop\\中国笔记\\逻辑词汇");
        yuhuan.analyze("12月16日至18日").forEach(e->{
            println(e);
        });
    }

}
