package com.samp.solr.similarity;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.TermStatistics;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.SmallFloat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


public class SampSimilarity extends Similarity {
	
	private static final Logger logger = LoggerFactory.getLogger(SampSimilarity.class);
	
	private static final String LOG_PREFIX = "========";
	
	  private final float k1;
	  private final float b;

	  public SampSimilarity(float k1, float b) {
	    if (Float.isFinite(k1) == false || k1 < 0) {
	      throw new IllegalArgumentException("illegal k1 value: " + k1 + ", must be a non-negative finite value");
	    }
	    if (Float.isNaN(b) || b < 0 || b > 1) {
	      throw new IllegalArgumentException("illegal b value: " + b + ", must be between 0 and 1");
	    }
	    this.k1 = k1;
	    this.b  = b;
	  }
	  
	  public SampSimilarity() {
	    this(1.2f, 0.75f);
	  }
	  
	  @Override
	  public float coord(int overlap, int maxOverlap) {
		  logger.debug(LOG_PREFIX+"coord overlap="+overlap);
		  logger.debug(LOG_PREFIX+"coord maxOverlap="+maxOverlap);
		  return overlap / (float)(maxOverlap);
		  //return 10f;
		  //return 1f;
	  }
	  
	  @Override
	public float queryNorm(float valueForNormalization) {
		  logger.debug(LOG_PREFIX+"queryNorm valueForNormalization="+valueForNormalization);
		return 1000f;
	}

	/** Implemented as <code>log(1 + (docCount - docFreq + 0.5)/(docFreq + 0.5))</code>. */
	  protected float idf(long docFreq, long docCount) {
		  logger.debug(LOG_PREFIX+"idf docFreq="+docFreq);
		  logger.debug(LOG_PREFIX+"idf docCount="+docCount);
	    return (float) Math.log(1 + (docCount - docFreq + 0.5D)/(docFreq + 0.5D));
	  }
	  
	  /** Implemented as <code>1 / (distance + 1)</code>. */
	  protected float sloppyFreq(int distance) {
		  logger.debug(LOG_PREFIX+"sloppyFreq distance="+distance);
	    return 1.0f / (distance + 1);
	  }
	  
	  /** The default implementation returns <code>1</code> */
	  protected float scorePayload(int doc, int start, int end, BytesRef payload) {
	    return 1;
	  }
	  
	  /** The default implementation computes the average as <code>sumTotalTermFreq / docCount</code>,
	   * or returns <code>1</code> if the index does not store sumTotalTermFreq:
	   * any field that omits frequency information). */
	  protected float avgFieldLength(CollectionStatistics collectionStats) {
		  logger.debug(LOG_PREFIX+"avgFieldLength collectionStats start" );
		  logger.debug(LOG_PREFIX+"avgFieldLength collectionStats field = "+collectionStats.field() );
		  logger.debug(LOG_PREFIX+"avgFieldLength collectionStats maxDoc = "+collectionStats.maxDoc() );
		  logger.debug(LOG_PREFIX+"avgFieldLength collectionStats docCount = "+collectionStats.docCount() );
		  logger.debug(LOG_PREFIX+"avgFieldLength collectionStats sumDocFreq = "+collectionStats.sumDocFreq() );
		  logger.debug(LOG_PREFIX+"avgFieldLength collectionStats sumTotalTermFreq = "+collectionStats.sumTotalTermFreq() );
		  logger.debug(LOG_PREFIX+"avgFieldLength collectionStats end" );
	    final long sumTotalTermFreq = collectionStats.sumTotalTermFreq();
	    if (sumTotalTermFreq <= 0) {
	      return 1f;       // field does not exist, or stat is unsupported
	    } else {
	      final long docCount = collectionStats.docCount() == -1 ? collectionStats.maxDoc() : collectionStats.docCount();
	      return (float) (sumTotalTermFreq / (double) docCount);
	    }
	  }
	  
	  /** The default implementation encodes <code>boost / sqrt(length)</code>
	   * with {@link SmallFloat#floatToByte315(float)}.  This is compatible with 
	   * Lucene's default implementation.  If you change this, then you should 
	   * change {@link #decodeNormValue(byte)} to match. */
	  protected byte encodeNormValue(float boost, int fieldLength) {
	    return SmallFloat.floatToByte315(boost / (float) Math.sqrt(fieldLength));
	  }

	  /** The default implementation returns <code>1 / f<sup>2</sup></code>
	   * where <code>f</code> is {@link SmallFloat#byte315ToFloat(byte)}. */
	  protected float decodeNormValue(byte b) {
	    return NORM_TABLE[b & 0xFF];
	  }
	  
	  /** 
	   * True if overlap tokens (tokens with a position of increment of zero) are
	   * discounted from the document's length.
	   */
	  protected boolean discountOverlaps = true;

	  /** Sets whether overlap tokens (Tokens with 0 position increment) are 
	   *  ignored when computing norm.  By default this is true, meaning overlap
	   *  tokens do not count when computing norms. */
	  public void setDiscountOverlaps(boolean v) {
	    discountOverlaps = v;
	  }

	  /**
	   * Returns true if overlap tokens are discounted from the document's length. 
	   * @see #setDiscountOverlaps 
	   */
	  public boolean getDiscountOverlaps() {
	    return discountOverlaps;
	  }
	  
	  /** Cache of decoded bytes. */
	  private static final float[] NORM_TABLE = new float[256];

	  static {
	    for (int i = 1; i < 256; i++) {
	      float f = SmallFloat.byte315ToFloat((byte)i);
	      NORM_TABLE[i] = 1.0f / (f*f);
	    }
	    NORM_TABLE[0] = 1.0f / NORM_TABLE[255]; // otherwise inf
	  }


	  @Override
	  public final long computeNorm(FieldInvertState state) {
//		  if( "1".equals("1") ){
//			  throw new RuntimeException("computeNorm called here");
//		  }
		  logger.debug(LOG_PREFIX+"computeNorm FieldInvertState start" );
		  logger.debug(LOG_PREFIX+"computeNorm FieldInvertState getBoost:"+state.getBoost() );
		  logger.debug(LOG_PREFIX+"computeNorm FieldInvertState getLength:"+state.getLength() );
		  logger.debug(LOG_PREFIX+"computeNorm FieldInvertState getMaxTermFrequency:"+state.getMaxTermFrequency());
		  logger.debug(LOG_PREFIX+"computeNorm FieldInvertState getName:"+state.getName() );
		  logger.debug(LOG_PREFIX+"computeNorm FieldInvertState getNumOverlap:"+state.getNumOverlap() );
		  logger.debug(LOG_PREFIX+"computeNorm FieldInvertState getOffset:"+state.getOffset() );
		  logger.debug(LOG_PREFIX+"computeNorm FieldInvertState getPosition:"+state.getPosition() );
		  logger.debug(LOG_PREFIX+"computeNorm FieldInvertState getUniqueTermCount:"+state.getUniqueTermCount());
		  logger.debug(LOG_PREFIX+"computeNorm FieldInvertState getAttributeSource:"+state.getAttributeSource() );
		  logger.debug(LOG_PREFIX+"computeNorm FieldInvertState end" );
	    final int numTerms = discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength();
	    logger.debug(LOG_PREFIX+"computeNorm result"+encodeNormValue(state.getBoost(), numTerms) );
	    return encodeNormValue(state.getBoost(), numTerms);
	  }

	  /**
	   * Computes a score factor for a simple term and returns an explanation
	   * for that score factor.
	   * 
	   * <p>
	   * The default implementation uses:
	   * 
	   * <pre class="prettyprint">
	   * idf(docFreq, docCount);
	   * </pre>
	   * 
	   * Note that {@link CollectionStatistics#docCount()} is used instead of
	   * {@link org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also 
	   * {@link TermStatistics#docFreq()} is used, and when the latter 
	   * is inaccurate, so is {@link CollectionStatistics#docCount()}, and in the same direction.
	   * In addition, {@link CollectionStatistics#docCount()} does not skew when fields are sparse.
	   *   
	   * @param collectionStats collection-level statistics
	   * @param termStats term-level statistics for the term
	   * @return an Explain object that includes both an idf score factor 
	             and an explanation for the term.
	   */
	  public Explanation idfExplain(CollectionStatistics collectionStats, TermStatistics termStats) {
	    final long df = termStats.docFreq();
	    final long docCount = collectionStats.docCount() == -1 ? collectionStats.maxDoc() : collectionStats.docCount();
	    final float idf = idf(df, docCount);
	    return Explanation.match(idf, "idf, computed as log(1 + (docCount - docFreq + 0.5) / (docFreq + 0.5)) from:",
	        Explanation.match(df, "docFreq"),
	        Explanation.match(docCount, "docCount"));
	  }

	  /**
	   * Computes a score factor for a phrase.
	   * 
	   * <p>
	   * The default implementation sums the idf factor for
	   * each term in the phrase.
	   * 
	   * @param collectionStats collection-level statistics
	   * @param termStats term-level statistics for the terms in the phrase
	   * @return an Explain object that includes both an idf 
	   *         score factor for the phrase and an explanation 
	   *         for each term.
	   */
	  public Explanation idfExplain(CollectionStatistics collectionStats, TermStatistics termStats[]) {
	    double idf = 0d; // sum into a double before casting into a float
	    List<Explanation> details = new ArrayList<>();
	    for (final TermStatistics stat : termStats ) {
	      Explanation idfExplain = idfExplain(collectionStats, stat);
	      details.add(idfExplain);
	      idf += idfExplain.getValue();
	    }
	    return Explanation.match((float) idf, "idf(), sum of:", details);
	  }

	  @Override
	  public final SimWeight computeWeight(CollectionStatistics collectionStats, TermStatistics... termStats) {
		 logger.debug(LOG_PREFIX+"computeWeight CollectionStatistics start" );
		 logger.debug(LOG_PREFIX+"computeWeight CollectionStatistics field"+collectionStats.field() );
		 logger.debug(LOG_PREFIX+"computeWeight CollectionStatistics maxDoc"+collectionStats.maxDoc() );
		 logger.debug(LOG_PREFIX+"computeWeight CollectionStatistics sumDocFreq"+collectionStats.sumDocFreq() );
		 logger.debug(LOG_PREFIX+"computeWeight CollectionStatistics sumTotalTermFreq"+collectionStats.sumTotalTermFreq() );
		 logger.debug(LOG_PREFIX+"computeWeight CollectionStatistics docCount"+collectionStats.docCount() );
		 logger.debug(LOG_PREFIX+"computeWeight CollectionStatistics end" );
		 for( int i = 0 ; i < termStats.length ; i++ ){
			 TermStatistics termStat = termStats[i];
			 logger.debug(LOG_PREFIX+"computeWeight TermStatistics start, i="+i );
			 logger.debug(LOG_PREFIX+"computeWeight TermStatistics docFreq"+termStat.docFreq() );
			 logger.debug(LOG_PREFIX+"computeWeight TermStatistics totalTermFreq"+termStat.totalTermFreq() );
			 logger.debug(LOG_PREFIX+"computeWeight TermStatistics term utf8ToString "+termStat.term().utf8ToString());
			 logger.debug(LOG_PREFIX+"computeWeight TermStatistics end" );
		 }
	    Explanation idf = termStats.length == 1 ? idfExplain(collectionStats, termStats[0]) : idfExplain(collectionStats, termStats);

	    float avgdl = avgFieldLength(collectionStats);

	    // compute freq-independent part of bm25 equation across all norm values
	    float cache[] = new float[256];
	    for (int i = 0; i < cache.length; i++) {
	      logger.debug(LOG_PREFIX+"computeWeight decodeNormValue "+  decodeNormValue((byte)i));
	      logger.debug(LOG_PREFIX+"computeWeight avgdl "+  avgdl);
	      cache[i] = k1 * ((1 - b) + b * decodeNormValue((byte)i) / avgdl);
	      logger.debug(LOG_PREFIX+"computeWeight cache["+i+"] "+  cache[i]);
	    }
	    return new SampStats(collectionStats.field(), idf, avgdl, cache);
	  }

	  @Override
	  public final SimScorer simScorer(SimWeight stats, LeafReaderContext context) throws IOException {
	    SampStats bm25stats = (SampStats) stats;
	    logger.debug(LOG_PREFIX+"LeafReaderContext reader = " +context.reader().getClass());
	    return new SampDocScorer(bm25stats, context.reader().getNormValues(bm25stats.field));
	  }
	  
	  private class SampDocScorer extends SimScorer {
	    private final SampStats stats;
	    private final float weightValue; // boost * idf * (k1 + 1)
	    private final NumericDocValues norms;
	    private final float[] cache;
	    
	    SampDocScorer(SampStats stats, NumericDocValues norms) throws IOException {
	      this.stats = stats;
	      this.weightValue = stats.weight * (k1 + 1);
	      this.cache = stats.cache;
	      this.norms = norms;
	    }
	    
	    @Override
	    public float score(int doc, float freq) {
	      // if there are no norms, we act as if b=0
	    	if( freq > 1 ){
	    		freq = 1;
	    	}
	      float norm = norms == null ? k1 : cache[(byte)norms.get(doc) & 0xFF];
	      logger.debug(LOG_PREFIX+"norms 2 ="+norm);
	      logger.debug(LOG_PREFIX+"doc="+doc+",score="+(weightValue * freq / (freq + norm)));
	      logger.debug(LOG_PREFIX+LOG_PREFIX+"doc="+doc+",weightValue="+weightValue );
	      logger.debug(LOG_PREFIX+LOG_PREFIX+"doc="+doc+",freq="+freq );
	      logger.debug(LOG_PREFIX+LOG_PREFIX+"doc="+doc+",norm="+norm );
	      return weightValue * freq / (freq + norm);
	    }
	    
	    @Override
	    public Explanation explain(int doc, Explanation freq) {
	      return explainScore(doc, freq, stats, norms);
	    }

	    @Override
	    public float computeSlopFactor(int distance) {
	    	if( "1".equals("1") ){
	    		throw new RuntimeException("computeSlopFactor call here");
	    	}
	      return sloppyFreq(distance);
	    }

	    @Override
	    public float computePayloadFactor(int doc, int start, int end, BytesRef payload) {
	      return scorePayload(doc, start, end, payload);
	    }
	  }
	  
	  /** Collection statistics for the BM25 model. */
	  private static class SampStats extends SimWeight {
	    /** BM25's idf */
	    private final Explanation idf;
	    /** The average document length. */
	    private final float avgdl;
	    /** query boost */
	    private float boost;
	    /** weight (idf * boost) */
	    private float weight;
	    /** field name, for pulling norms */
	    private final String field;
	    /** precomputed norm[256] with k1 * ((1 - b) + b * dl / avgdl) */
	    private final float cache[];

	    SampStats(String field, Explanation idf, float avgdl, float cache[]) {
	      this.field = field;
	      this.idf = idf;
	      this.avgdl = avgdl;
	      this.cache = cache;
	      normalize(1f, 1f);
	    }

	    @Override
	    public float getValueForNormalization() {
	      // we return a TF-IDF like normalization to be nice, but we don't actually normalize ourselves.
	      return weight * weight;
	    }

	    @Override
	    public void normalize(float queryNorm, float boost) {
	      // we don't normalize with queryNorm at all, we just capture the top-level boost
	      this.boost = boost;
	      this.weight = idf.getValue() * boost;
	    } 
	  }

	  private Explanation explainTFNorm(int doc, Explanation freq, SampStats stats, NumericDocValues norms) {
	    List<Explanation> subs = new ArrayList<>();
	    subs.add(freq);
	    subs.add(Explanation.match(k1, "parameter k1"));
	    if (norms == null) {
	      subs.add(Explanation.match(0, "parameter b (norms omitted for field)"));
	      return Explanation.match(
	          (freq.getValue() * (k1 + 1)) / (freq.getValue() + k1),
	          "tfNorm, computed as (freq * (k1 + 1)) / (freq + k1) from:", subs);
	    } else {
	      float doclen = decodeNormValue((byte)norms.get(doc));
	      subs.add(Explanation.match(b, "parameter b"));
	      subs.add(Explanation.match(stats.avgdl, "avgFieldLength"));
	      subs.add(Explanation.match(doclen, "fieldLength"));
	      return Explanation.match(
	          (freq.getValue() * (k1 + 1)) / (freq.getValue() + k1 * (1 - b + b * doclen/stats.avgdl)),
	          "tfNorm, computed as (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength)) from:", subs);
	    }
	  }

	  private Explanation explainScore(int doc, Explanation freq, SampStats stats, NumericDocValues norms) {
	    Explanation boostExpl = Explanation.match(stats.boost, "boost");
	    List<Explanation> subs = new ArrayList<>();
	    if (boostExpl.getValue() != 1.0f)
	      subs.add(boostExpl);
	    subs.add(stats.idf);
	    Explanation tfNormExpl = explainTFNorm(doc, freq, stats, norms);
	    subs.add(tfNormExpl);
	    return Explanation.match(
	        boostExpl.getValue() * stats.idf.getValue() * tfNormExpl.getValue(),
	        "score(doc="+doc+",freq="+freq+"), product of:", subs);
	  }

	  @Override
	  public String toString() {
	    return "Samp(k1=" + k1 + ",b=" + b + ")";
	  }
	  
	  /** 
	   * Returns the <code>k1</code> parameter
	   * @see #BM25Similarity(float, float) 
	   */
	  public final float getK1() {
	    return k1;
	  }
	  
	  /**
	   * Returns the <code>b</code> parameter 
	   * @see #BM25Similarity(float, float) 
	   */
	  public final float getB() {
	    return b;
	  }

}
