package com.bonc.vectorspace.model;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.Set;

import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.NlpAnalysis;
//import org.apache.commons.lang.StringUtils;
import org.apache.commons.io.FileUtils;

/**
 * This class represents one document.
 * It will keep track of the term frequencies.
 * @author swapneel
 *
 */
public class Document implements Comparable<Document> {
	
	/**
	 * A hashmap for term frequencies.
	 * Maps a term to the number of times this terms appears in this document. 
	 */
	private HashMap<String, Integer> termFrequency;
	
	private String id;
	
	private String path;
	
	private String content;

	/**
	 * The name of the file to read.
	 */
	private String filename;
	
	private Map<String, String> stopwords;
	
	//3
	public Document(String filename, Boolean chineseSeg, Map<String, String> stopwordsChi) {
		this.filename = filename;
		termFrequency = new HashMap<String, Integer>();
		this.stopwords = stopwordsChi;
		if(chineseSeg){
			readChineseFileAndPreProcess();
		}else{
			readFileAndPreProcess();
		}
	}
	
	//5
	public Document(String id, String path, String filename, Boolean chineseSeg, Map<String, String> stopwordsChi) {
		this.setId(id);
		this.filename = filename;
		this.path = path;
		termFrequency = new HashMap<String, Integer>();
		this.stopwords = stopwordsChi;
		if(chineseSeg){
			readChineseFileAndPreProcess();
		}else{
			readFileAndPreProcess();
		}
	}
	
	//4
	public Document(String id, String content, Boolean chineseSeg, Map<String, String> stopwordsChi) {
		this.setId(id);
		this.content = content.trim();
		this.filename = id;
		termFrequency = new HashMap<String, Integer>();
		this.stopwords = stopwordsChi;
		if(chineseSeg){
			readChineseContentAndPreProcess();
		}else{
			readFileAndPreProcess();
		}
	}

	private void readChineseContentAndPreProcess(){
		if(content!=null){
			List<Term> terms = NlpAnalysis.parse(this.content.trim());
			String regex = "(?!^[\\d]+$)^[a-zA-Z0-9\u4E00-\u9FA5]+$";

			String trimWord = "";
			for (Term term : terms) {
				trimWord = term.getName().trim();
				if (trimWord!=null && !trimWord.isEmpty() && trimWord.length() > 1 
						&& !this.stopwords.containsKey(trimWord)
						&& trimWord.matches(regex)) {

					if (termFrequency.containsKey(trimWord)) {
						int oldCount = termFrequency.get(trimWord);
						termFrequency.put(trimWord, ++oldCount);
//						System.out.println("putting term=="+trimWord);
					} else {
						termFrequency.put(trimWord, 1);
					}
				}
			}			
		}
	}
	
	private void readChineseFileAndPreProcess(){
//		String path = "C:\\Users\\Administrator\\git\\hierarchical-clustering-java\\src\\main\\resources\\chi\\";
		String fname = path + filename;
		try {
			File file = new File(fname);
			String content = FileUtils.readFileToString(file); 
//			String content = FileUtils.readFileToString(file, "gb2312");
			this.content = content;
			readChineseContentAndPreProcess();
			
		} catch (IOException e) {
			e.printStackTrace();
		}
	}
	
	
	/**
	 * The constructor.
	 * It takes in the name of a file to read.
	 * It will read the file and pre-process it.
	 * @param filename the name of the file
	 */
	public Document(String filename) {
		this.filename = filename;
		termFrequency = new HashMap<String, Integer>();
		
		readFileAndPreProcess();
	}
	
	/**
	 * This method will read in the file and do some pre-processing.
	 * The following things are done in pre-processing:
	 * Every word is converted to lower case.
	 * Every character that is not a letter or a digit is removed.
	 * We don't do any stemming.
	 * Once the pre-processing is done, we create and update the 
	 */
	private void readFileAndPreProcess() {
		try {
			String path = "C:\\Users\\Administrator\\git\\hierarchical-clustering-java\\src\\main\\resources\\";
			String fname = path + filename;
			Scanner in = new Scanner(new File(fname));
			System.out.println("Reading file: " + filename + " and preprocessing");
			
			while (in.hasNext()) {
				String nextWord = in.next();
				
				String filteredWord = nextWord.replaceAll("[^A-Za-z0-9]", "").toLowerCase();
				
				if (!(filteredWord.equalsIgnoreCase(""))) {
					if (termFrequency.containsKey(filteredWord)) {
						int oldCount = termFrequency.get(filteredWord);
						termFrequency.put(filteredWord, ++oldCount);
					} else {
						termFrequency.put(filteredWord, 1);
					}
				}
			}
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		}
	}
	
	/**
	 * This method will return the term frequency for a given word.
	 * If this document doesn't contain the word, it will return 0
	 * @param word The word to look for
	 * @return the term frequency for this word in this document
	 */
	public double getTermFrequency(String word) {
		if (termFrequency.containsKey(word)) {
			return termFrequency.get(word);
		} else {
			return 0;
		}
	}
	
	/**
	 * This method will return a set of all the terms which occur in this document.
	 * @return a set of all terms in this document
	 */
	public Set<String> getTermList() {
		return termFrequency.keySet();
	}


	/**
	 * The overriden method from the Comparable interface.
	 */
//	@Override
	public int compareTo(Document other) {
		return filename.compareTo(other.getFileName());
	}

	/**
	 * @return the filename
	 */
	public String getFileName() {
		return filename;
	}
	
	/**
	 * This method is used for pretty-printing a Document object.
	 * @return the filename
	 */
	public String toString() {
		return filename;
	}

	public String getId() {
		return id;
	}
	
	public void setId(String id) {
		this.id =id;
	}
	
	
	public String getContent() {
		return content;
	}
}
