package org.apache.lucene.analysis.compound;

/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */


import java.util.Set;
import java.util.HashMap;

import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;

import org.apache.lucene.analysis.compound.morphology.sandhi;
import org.apache.lucene.analysis.compound.morphology.savarNa;
import org.apache.lucene.analysis.compound.morphology.guNa;
import org.apache.lucene.analysis.compound.morphology.ayAdi;
import org.apache.lucene.analysis.compound.morphology.poorvaroopa;
import org.apache.lucene.analysis.compound.morphology.vridhi;
import org.apache.lucene.analysis.compound.morphology.yaN;
import org.apache.lucene.analysis.compound.morphology.dePluralizer;

/**
 * A {@link TokenFilter} that decomposes compound words found in many Germanic languages.
 * <p>
 * "Donaudampfschiff" becomes Donau, dampf, schiff so that you can find
 * "Donaudampfschiff" even when you only enter "schiff". 
 *  It uses a brute-force algorithm to achieve this.
 * <p>
 * You must specify the required {@link Version} compatibility when creating
 * CompoundWordTokenFilterBase:
 * <ul>
 * <li>As of 3.1, CompoundWordTokenFilterBase correctly handles Unicode 4.0
 * supplementary characters in strings and char arrays provided as compound word
 * dictionaries.
 * </ul>
 */
public class SandhiCompoundWordTokenFilter extends CompoundWordTokenFilterBase {
  
  /**
   * Creates a new {@link SandhiCompoundWordTokenFilter}
   * 
   * @param matchVersion
   *          Lucene version to enable correct Unicode 4.0 behavior in the
   *          dictionaries if Version > 3.0. See <a
   *          href="CompoundWordTokenFilterBase.html#version"
   *          >CompoundWordTokenFilterBase</a> for details.
   * @param input
   *          the {@link TokenStream} to process
   * @param dictionary
   *          the word dictionary to match against.
   */
  public SandhiCompoundWordTokenFilter(Version matchVersion, TokenStream input, CharArraySet dictionary) {
    super(matchVersion, input, dictionary);
    if (dictionary == null) {
      throw new IllegalArgumentException("dictionary cannot be null");
    }
  }
  
  /**
   * Creates a new {@link SandhiCompoundWordTokenFilter}
   * 
   * @param matchVersion
   *          Lucene version to enable correct Unicode 4.0 behavior in the
   *          dictionaries if Version > 3.0. See <a
   *          href="CompoundWordTokenFilterBase.html#version"
   *          >CompoundWordTokenFilterBase</a> for details.
   * @param input
   *          the {@link TokenStream} to process
   * @param dictionary
   *          the word dictionary to match against.
   * @param minWordSize
   *          only words longer than this get processed
   * @param minSubwordSize
   *          only subwords longer than this get to the output stream
   * @param maxSubwordSize
   *          only subwords shorter than this get to the output stream
   * @param onlyLongestMatch
   *          Add only the longest matching subword to the stream
   */
  public SandhiCompoundWordTokenFilter(Version matchVersion, TokenStream input, CharArraySet dictionary,
      int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
    super(matchVersion, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch);
    if (dictionary == null) {
      throw new IllegalArgumentException("dictionary cannot be null");
    }
  }

  @Override
  protected void decompose() {
    final int len = termAtt.length();
	sandhi  s = new savarNa();
	String nw = s.split(termAtt.buffer(), len, dictionary);
	if (nw == null) {
		sandhi g = new guNa();
		nw = g.split(termAtt.buffer(), len, dictionary);
	}
	if (nw == null) {
		  sandhi g = new yaN();
		  nw = g.split(termAtt.buffer(), len, dictionary);
	}
	if (nw == null) {
		  sandhi g = new vridhi();
		  nw = g.split(termAtt.buffer(), len, dictionary);
	}
	if (nw == null) {
		  sandhi g = new poorvaroopa();
		  nw = g.split(termAtt.buffer(), len, dictionary);
	}
	if (nw == null) {
		  sandhi g = new ayAdi();
		  nw = g.split(termAtt.buffer(), len, dictionary);
	}
	if (nw == null) {
		  dePluralizer d = new dePluralizer();
		  nw = d.dePluralize(termAtt.buffer(), len, dictionary);
	}
	
	if (nw != null) {
		System.out.println("sandhi split: " + nw);
		restoreState(current);
		clearAttributes();
		termAtt.copyBuffer(nw.toCharArray(), 0, nw.length());
		termAtt.setLength(nw.length());
		current = captureState();
		int ind = nw.indexOf(" ");
		tokens.add(new CompoundToken(0, ind));
		tokens.add(new CompoundToken(ind+1, nw.length()-ind-1));
	}
  }
}
