package com.taro.apise.indexer.model;

import lombok.Data;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.ansj.domain.Result;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;

import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.util.*;
import java.util.stream.Collectors;

@Slf4j
@Data
public class Document {


    private Integer docId;  // docId 会在正排索引插入后才会赋值
    private String title;   // 从文件名中解析出来
    private String url;     // 依赖两个额外的信息（1. https://docs.oracle.com/javase/8/docs/api/  2. 相对路径的相对位置）
    private String content; // 从文件中读取出来，并且做一定的处理

    private final Map<String, Integer> titleWordCountMap = new HashMap<>();
    private final Map<String, Integer> contentWordCountMap = new HashMap<>();

    // 专门给测试用例用的构造方法
    public Document(String title, String url, String content) {
        this.title = title;
        this.url = url;
        this.content = content;
    }

    public Document(File file, String urlPrefix, File rootFile) {
        this.title = parseTitle(file);
        this.url = parseUrl(file, urlPrefix, rootFile);
        this.content = parseContent(file);
//        log.debug("构建文档：{}", title);
    }

    private String parseTitle(File file) {
        // 从文件名中，将 .html 后缀去掉，剩余的看作标题
        String name = file.getName();
        String suffix = ".html";
        return name.substring(0, name.length() - suffix.length());
    }

    @SneakyThrows
    private String parseUrl(File file, String urlPrefix, File rootFile) {
        // 需要得到一个相对路径，file 相对于 rootFile 的相对路径

        // 则相对路径就是：javax\sql\DataSource.html
        // 把所有反斜杠(\) 变成正斜杠(/)
        // 最终得到 java/sql/DataSource.html

        String rootPath = rootFile.getCanonicalPath();
//        rootPath = rootPath.replace("/", "\\");
//        if (!rootPath.endsWith("\\")) {
//            rootPath = rootPath + "\\";
//        }

        String filePath = file.getCanonicalPath();
        String relativePath = filePath.substring(rootPath.length());
       // relativePath = relativePath.replace();

        return urlPrefix + relativePath;
    }

    @SneakyThrows
    private String parseContent(File file) {
        StringBuilder contentBuilder = new StringBuilder();

        try (InputStream is = new FileInputStream(file)) {
            try (Scanner scanner = new Scanner(is, "ISO-8859-1")) {
                while (scanner.hasNextLine()) {
                    String line = scanner.nextLine();
                    contentBuilder.append(line).append(" ");
                }

                return contentBuilder.toString()
                        .replaceAll("<script.*?>.*?</script>" , " ")
                        //.replaceAll("<script[^>]*>[^<]*</script>", " ")
                        .replaceAll("<.*?>" , " ")
                        //.replaceAll("<[^>]*>", " ")
                        .replaceAll("\\s+", " ")
                        .trim();
            }
        }
    }

    public Map<String, Integer> segWordAndCalcWeight() {
        //计算分词权重
        segTitleWord();
        segContentWord();

        Set<String> wordSet = new HashSet<>(titleWordCountMap.keySet());
        wordSet.addAll(contentWordCountMap.keySet());
        Map<String, Integer> wordToWeight = new HashMap<>();
        for (String word : wordSet) {
            int weight = calcWeight(word);
            wordToWeight.put(word, weight);
        }

        return wordToWeight;
    }
    private int calcWeight(String word) {
        //计算分词权重信息
        int countInTitle = titleWordCountMap.getOrDefault(word, 0);
        int countInContent = contentWordCountMap.getOrDefault(word, 0);

        return countInTitle * 10 + countInContent;
    }

    private static final Set<String> ignoredNatureStrSet;

    static {
        ignoredNatureStrSet = new HashSet<>();
        ignoredNatureStrSet.add("w");
    }

    private void segContentWord() {
        segAndCount(title, titleWordCountMap);
    }

    private void segTitleWord() {
        segAndCount(content, contentWordCountMap);
    }

    private void segAndCount(String s, Map<String, Integer> map) {
        //计算这个分词出现多少次
        Result result = ToAnalysis.parse(s);
        List<Term> termList = result.getTerms();
        List<String> wordList = termList.stream()
                .filter(term -> !ignoredNatureStrSet.contains(term.getNatureStr()))
                .map(Term::getName)
                .collect(Collectors.toList());

        for (String word : wordList) {
            int count = map.getOrDefault(word, 0);
            map.put(word, count + 1);
        }
    }





//    @SneakyThrows
//    private String parseContent(File file) {
//        StringBuilder contentBuilder = new StringBuilder();
//        // 假定文件一定是存在的
//        try (InputStream is = new FileInputStream(file)) {
//            // 这些文档的字符是 ISO-8859-1，这个是明确告诉大家的，不是一个知识点
//            try (Scanner scanner = new Scanner(is, "ISO-8859-1")) {
//                while (scanner.hasNextLine()) {
//                    String line = scanner.nextLine();
//
//                    // 首先去掉 <script ...>...</script>
//                    line = line.replaceAll("<script[^>]*>[^<]*</script>", " ");     // 这两个操作会比较慢
//                    // 去掉标签
//                    line = line.replaceAll("<[^>]*>", " ");                         // 这两个操作会比较慢
//                    // 多带的空格的意思是，把 换行符 也视为空格了
//                    contentBuilder.append(line).append(" ");
//                }
//            }
//        }
//
//        // 把最后多出来的空格删除掉
//        return contentBuilder.toString().replaceAll("\\s+", " ").trim();
//    }
}
