
package textmining.gpanalysis;
import Jama.Matrix;

/**
 *
 * @author ibrahimsabek
 */
public class IDFHandler{

    public IDFHandler(){        
    }


  /**
   * create IDF matrix represent all vectors with the wordset
   */
  public Matrix createIDFMatrix(Matrix matrix) {

	  //apply IDF weight to the raw word frequencies
    int n = matrix.getColumnDimension();

    for (int j = 0; j < matrix.getColumnDimension(); j++) {
      for (int i = 0; i < matrix.getRowDimension(); i++) {

        double matrixElement = matrix.get(i, j);
        if (matrixElement > 0.0D) {
          double dm = countDocsWithWord(matrix.getMatrix(i, i, 0, matrix.getColumnDimension() - 1));
          matrix.set(i, j, matrix.get(i,j) * (1 + Math.log(n) - Math.log(dm)));
        }
      }
    }

    //normalize the word scores for a single document
    for (int j = 0; j < matrix.getColumnDimension(); j++) {
      double sum = sum(matrix.getMatrix(0, matrix.getRowDimension() -1, j, j));
      for (int i = 0; i < matrix.getRowDimension(); i++) {
        matrix.set(i, j, (matrix.get(i, j) / sum));
      }
    }
    return matrix;
  }


    private double sum(Matrix colMatrix) {
        double sum = 0.0D;
        for (int i = 0; i < colMatrix.getRowDimension(); i++) {
          sum += colMatrix.get(i, 0);
        }
        return sum;
    }

   private double countDocsWithWord(Matrix rowMatrix) {
    double numDocs = 0.0D;
    for (int j = 0; j < rowMatrix.getColumnDimension(); j++) {
      if (rowMatrix.get(0, j) > 0.0D) {
        numDocs++;
      }
    }
    return numDocs;
  }

   
}
