package com.classify.textClassify.doc;

import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;

import com.classify.word.WordSegmenter;
import com.google.common.base.Charsets;
import com.google.common.base.Splitter;
import com.google.common.io.Files;

public class Fenci {
    public String content;
    Map<String, Integer> wordsFreq = new HashMap<String, Integer>();

    public Fenci(String content) {
        this.content = content;
    }

    public boolean containsWord(String word) {
        return wordsFreq.containsKey(word);
    }

    public int getWordFreq(String word) {
        if(wordsFreq.containsKey(word)) {
            return wordsFreq.get(word);
        }
        return 0;
    }

    public void incrWord(String word) {
        wordsFreq.put(word, wordsFreq.containsKey(word) ? wordsFreq.get(word) + 1 : 1);
    }

    public Map<String, Integer> docToWords(WordSegmenter ws) {
        content = content.replaceAll("\\p{Punct}", " ")
                .replaceAll("\\pP", " ")
                .replaceAll("　", " ")
                .replaceAll("\\p{Blank}", " ")
                .replaceAll("\\p{Space}", " ")
                .replaceAll("\\p{Cntrl}", " ");

        for (String sen : Splitter.on(" ").omitEmptyStrings()
                .splitToList(content)) {
            for(String word : ws.segment(sen)) {
                incrWord(word);
            }
        }
        return wordsFreq;
    }

}
