package storage.v1;

import java.util.HashMap;
import java.util.HashSet;

import com.onpositive.nlp.sequence.ClassifiedSequence;
import com.onpositive.nlp.sequence.IDefinitionExtractor;
import com.onpositive.nlp.sequence.SequenceParser;
import com.onpositive.wiki3.db.IWikiDB;
import com.onpositive.wiki3.db.impl.WikiDBImpl;

public class DefinitionExtractor implements IDefinitionExtractor {
	//
	// LexicalizedParser lp = LexicalizedParser
	// .loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz");

	public ClassifiedSequence doExtract(String content, IWikiDB db, String title,
			String[] possibleParents) {
		content = ParserV2.killComments(content);
		content = ParserV2.killRefs(content);
		content = ParserV2.killTemplates(content);
		content = ParserV2.killPictures(content);
		content = ParserV2.killBreaks(content);
		content = ParserV2.killWWWLinksAndSingleSquareBrackets(content);
		content = ParserV2.killMultipleSpaces(content);
		if (title.endsWith("s")) {
			title = title.substring(0, title.length() - 1);
		}
		title = title.replace('_', ' ');
		title=title.replace('-', ' ');
		// int indexOf = content.indexOf('.');
		if (true) {
			String fs = content;
			fs = fs.replace('\'', ' ');
			fs = fs.replace(',', ' ');
			fs = fs.replace('[', ' ');
			fs = fs.replace('\'', ' ');
			fs = fs.replace('|', ' ');
			fs = fs.replace('-', ' ');
			fs = fs.replace((CharSequence)"]]", "");
			fs = fs.replace((CharSequence)"[[", "");
			
			fs = fs.replaceAll(" an ", " ");
			fs = fs.replaceAll(" a ", " ");
			fs = fs.replaceAll(" the ", " ");
			fs = ParserV2.killMultipleSpaces(fs).toLowerCase().trim();

			int indexOf2 = fs.indexOf(title.toLowerCase());
			if (indexOf2 != -1) {
				fs = fs.substring(indexOf2);
				int l = 0;
				int pc=0;
				StringBuilder bld = new StringBuilder();
				for (int a = 0; a < fs.length(); a++) {
					char c = fs.charAt(a);
					if (c == '(') {
						l++;
					}
					if (c == ')') {
						l--;
						continue;
					}					
					if (l == 0) {
						if (c == '='&&pc=='=') {
							fs=bld.toString();
							break;
						}
						bld.append(c);						
					}
					pc=c;
				}
				if (fs.length()<title.length()){
					return null;
				}
				fs=fs.substring(title.length()).trim();
				HashSet<String>toIgnore=new HashSet<String>();
				toIgnore.add("is");
				toIgnore.add("are");
				toIgnore.add("was");
				toIgnore.add("in");
				toIgnore.add("of");
				toIgnore.add("were");
				HashMap<String,String>pparents=new HashMap<String,String>();
				ClassifiedSequence parse = SequenceParser.parse(fs.replace(' ', '_'),(WikiDBImpl)db);
				return parse;
			}

			// TreebankLanguagePack tlp = new PennTreebankLanguagePack();
			// GrammaticalStructureFactory gsf =
			// tlp.grammaticalStructureFactory();
			// for (List<HasWord> sentence : new DocumentPreprocessor(
			// new StringReader(fs), DocType.Plain)) {
			// Tree parse = lp.apply(sentence);
			// parse.pennPrint();
			// GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
			// List<TypedDependency> tdl = gs
			// .typedDependenciesCCprocessed(true);
			// for (TypedDependency t : tdl) {
			// System.out.println(t.gov().nodeString() + "->"
			// + t.dep().nodeString() + ":"
			// + t.reln().getLongName());
			// }
			// System.out.println(tdl);
			// }
			return null;
		}
		return null;
	}

}
