package com.bruin_du.searcher.indexer.model;

import lombok.Data;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.ToAnalysis;

import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.util.*;
import java.util.stream.Collectors;

@Data
@Slf4j
public class Document {
    private Integer docId;
    private String title;
    private String url;
    private String content;

    public Document(File file, String urlPrefix, File rootFile) {
        this.title = parseTitle(file);
        this.url = parseUrl(file, urlPrefix, rootFile);
        this.content = parseContent(file);
    }

    private String parseTitle(File file) {
        String name = file.getName();
        String suffix = ".html";
        return name.substring(0, name.length() - suffix.length());
    }

    @SneakyThrows
    private String parseUrl(File file, String urlPrefix, File rootFile) {
        String rootPath = rootFile.getCanonicalPath();
        rootPath = rootPath.replaceAll("/", "\\");
        if (!rootPath.endsWith("\\")) {
            rootPath += "\\";
        }

        String filePath = file.getCanonicalPath();
        String relativePath = filePath.substring(rootPath.length()).replaceAll("\\\\", "/");

        return urlPrefix + relativePath;
    }

    @SneakyThrows
    private String parseContent(File file) {
        StringBuilder contentBuilder = new StringBuilder();

        try (InputStream is = new FileInputStream(file)) {
            try (Scanner scanner = new Scanner(is, "ISO-8859-1")) {
                while (scanner.hasNextLine()) {
                    contentBuilder.append(scanner.nextLine()).append(" ");
                }

                return contentBuilder.toString()
                        .replaceAll("<script[^>]*>[^<]*</script>", " ")
                        .replaceAll("<[^>]*>", " ")
                        .replaceAll("\\s+", " ")
                        .trim();
            }
        }
    }

    // 针对文档进行分词，并且分别计算每个词的权重
    public Map<String, Integer> segWordAndCalcWeight() {
        // 统计标题中的每个词出现次数 | 分词：标题有哪些词
        List<String> wordInTitle = ToAnalysis.parse(title)
                .getTerms()
                .stream()
                .parallel()
                .filter(vo -> {return vo.item().equals(" ");})
                .map(Term::getName)
                .collect(Collectors.toList());

        // 统计标题中，每个词的出现次数 | 统计次数
        Map<String, Integer> titleWordCount = new HashMap<>();
        for (String word : wordInTitle) {
            int count = titleWordCount.getOrDefault(word, 0);
            titleWordCount.put(word, count + 1);
        }

        // 统计内容中的词，以及词的出现次数
        List<String> wordInContent = ToAnalysis.parse(content)
                .getTerms()
                .stream()
                .parallel()
                .map(Term::getName)
                .collect(Collectors.toList());
        Map<String, Integer> contentWordCount = new HashMap<>();
        for (String word : wordInContent) {
            int count = contentWordCount.getOrDefault(word, 0);
            contentWordCount.put(word, count + 1);
        }

        // 计算权重值
        Map<String, Integer> wordToWeight = new HashMap<>();
        // 先计算出有哪些词，不重复
        Set<String> wordSet = new HashSet<>(wordInTitle);
        wordSet.addAll(wordInContent);

        for (String word : wordSet) {
            int titleCount = titleWordCount.getOrDefault(word, 0);
            int contentCount = contentWordCount.getOrDefault(word, 0);
            int weight = titleCount * 100 + contentCount;

            wordToWeight.put(word, weight);
        }

        return wordToWeight;
    }
}
