/*
 * Copyright 2011 Michael Guo
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package edu.uc.cs.guome.ir;

import java.util.HashMap;

/**
 * Parse the input into a Document objects
 * @author(name="Michael Guo",date="01/18/2011")
 * @version 0.1
 */
public class Parser {
		
	public static Document parse(String in, StopWordList stopWordList) throws Exception {
		
		// text format: <number><white_space><text>
		int i = in.indexOf('\t');
		int id = Integer.parseInt(in.substring(0, i));
		String text = in.substring(i+1);

		Document doc = new Document();
		doc.setDocID(id);
		
		HashMap<String, TermFrequency> terms = new HashMap<String, TermFrequency>();
		Tokenizer st = new Tokenizer(text);
		while(st.hasMoreTokens()) {
			String token = st.nextToken();
			TermFrequency tf = null;
			
			//linguistic processing 
			if (token.matches("\\W*"))
				continue;
			token = token.toLowerCase();
			token = token.trim();
			while (token.matches("\".*"))
				token = token.substring(1);
			token = token.replaceAll("[:|.|,|;|\"]+$","");
			if (token.startsWith("(") && !token.contains(")"))
				token = token.substring(1);
			if (token.endsWith(")") && !token.contains("("))
				token = token.substring(0, token.length()-1);
			if (token.startsWith("\"")) token = token.substring(1);
			
			//stop word filtering
			if(!stopWordList.getStopWordSet().contains(token)){
				if (terms.containsKey(token)){
					tf = (TermFrequency)terms.get(token);
					tf.increase();
					terms.put(token, tf);
				}else{
					tf = new TermFrequency();
					terms.put(token, tf);
				}
			}
		}
		doc.setTerms(terms);
		
		return doc;
	}
}
