package core.proto.inforet;

import java.io.*;
import java.util.*;
import java.lang.Math;
import Jama.Matrix;

// look at http://math.nist.gov/javanumerics/jama/doc/
// for documentation on Matrix class from JAMA
public class similarityMatrix {

    //stores the word and the position of that words in the termDoc matrix (row)
    static TreeMap<String, Integer> index= new TreeMap<String, Integer >();
    static LinkedList<String> stopList= new LinkedList<String>();
    static String[]fileNames;
    //list of all the words encountered
    static ArrayList<String> allwords=new ArrayList<String>();
    //list of words that are common between 2 documents
    static ArrayList<String> commonwords=new ArrayList<String>();
    static int N; //number of documents, corpus size
    static int M; //number of terms
    static Matrix termDoc;// M by N matrix of zeros 
    static Matrix docSim;// N by N matrix, reduced C
    static int position=0;
    //change this to your directory with all the files
    static String direct="/Users/veva/Documents/workspace/MEng Alone/test";
    //Change this to where your stop list is, separate directory as files
    static String stopListPath=direct.concat("/stoplist.txt");
    //total common words
    static int totalCommonWords=0;

    /**
     * @param args
     * @throws IOException 
     */
    public static void main(String[] args) throws IOException {
        // TODO Auto-generated method stub
        FileInputStream fin = new FileInputStream (stopListPath);
        BufferedReader myInput = new BufferedReader(new InputStreamReader(fin));
        String stopWord=myInput.readLine();
        Scanner scan;
        // fill StopList with stop words
        while(stopWord!=null){
            stopList.add(stopWord);
            stopWord=myInput.readLine();
        }

        File directory = new File(direct);
        File files[] = directory.listFiles();
        N=files.length-1;
        fileNames= new String[N];
        //go through all the files in the directory to create index
        for (int i=1;i<=N;i++){
            fin = new FileInputStream (files[i]);
            scan= new Scanner(fin);
            fileNames[i-1]=files[i].getName();
            readWords(scan,files[i].getName(),i-1);
        }
        M=index.size();
        //System.out.println(M);
        //step 1: Create a term-document matrix
        termDoc= new Matrix(M,N);// M by N matrix of zeros 
        docSim= new Matrix(N,N);
        //fill matrix
        for (int i=1;i<=N;i++){
            fin = new FileInputStream (files[i]);
            scan= new Scanner(fin);
            fileNames[i-1]=files[i].getName();
            fillMatrix(scan,files[i].getName(),i-1);
        }         
        //step 2: convert to tf.idf weighting
        //tfidf();
        //step 3:normalize
        //normalize();
        //step 4: similarity for all documents
        similarity();
        //get total common words, needed to divide with
        for(int i=0;i<N;i++){
            for(int j=i+1;j<N;j++){
                totalCommonWords(i+1,j+1);
            }
        }
        System.out.println("TOTAL COMMON WORDS:"+totalCommonWords);
        //get prints for xml document
        for(int i=0;i<N;i++){
            for(int j=i+1;j<N;j++){
                giveInfo(i+1,j+1);
            }
        }

    }



    public static void readWords(Scanner scan, String currentFile, int docNum) {
        while(scan.hasNext()){
            String word = scan.next();
            //ignore words that don't start with a letter
            if((word.charAt(0)>64)&&(word.charAt(0)<123)){
                word= word.toLowerCase();
                //remove punctuation marks
                if(word.contains( "." )){
                    word=word.substring( 0, word.indexOf( '.' ) );
                }
                if(word.contains( "'" )){
                    word=word.substring( 0, word.indexOf( '\'' ) );
                }
                if(!stopList.contains(word)){
                    //word not in stop list
                    if(!index.containsKey(word)){
                        //word not in index, add for the first time
                        index.put(word, position);
                        position++;
                        allwords.add( word );
                    }
                }
            }
        }

    }
    
    public static void fillMatrix(Scanner scan, String currentFile, int docNum) {
        while(scan.hasNext()){
            String word = scan.next();
            //ignore words that don't start with a letter
            if((word.charAt(0)>64)&&(word.charAt(0)<123)){
                word= word.toLowerCase();
              //remove punctuation marks
                word = word.replaceAll("[^A-Za-z]", "");
                if(!stopList.contains(word)){
                    //word not in stop list
                    if(index.containsKey(word)){
                        //word in index
                        termDoc.set(index.get(word), docNum, termDoc.get(index.get(word), docNum)+1);
                        //row is the words position, column is the document N, increase word frequency by 1
                    }
                }
            }
        }

    }


    //convert to tf.idf weighting
    public static void tfidf(){
        for(int i=0;i<M;i++){
            int docFreq=0;
            //get document frequency for a term
            for (int j=0;j<N;j++){
                if (termDoc.get(i, j)>0){
                    docFreq++;
                }
            }
            //this math is from Informtion Retrieval
            //look at lecture 4 from http://www.infosci.cornell.edu/Courses/info4300/2010fa/syllabus.html
            for (int j=0;j<N;j++){
                double idf= Math.log10((((double)(N))/docFreq));
                double tf;
                if(termDoc.get(i, j)>0){
                    tf =Math.log10(((double)(termDoc.get(i, j)))) + 1;
                }else{
                    tf=0;
                }
                double tfidf= tf*idf;
                termDoc.set(i, j, tfidf);
            }
        }
    }
    
    //normalize
    public static void normalize(){
        for(int j=0;j<N;j++){
            double sum=0;
            for (int i=0;i<M;i++){
                sum=sum+Math.pow(termDoc.get(i, j),2);
            }
            sum= Math.sqrt( sum );
            for (int i=0;i<M;i++){
                termDoc.set(i, j,termDoc.get(i, j)/sum);
            }
        }
        
    }
    
  //applied similarity values
    public static void similarity() throws FileNotFoundException {
        //calculate all similarity values
        for(int i=0;i<N;i++){
            docSim.set( i, i, 1 );
            Matrix d1= termDoc.getMatrix( 0, M-1, i, i); //get row
            for(int j=i+1;j<N;j++){
                Matrix d2= termDoc.getMatrix( 0, M-1, j, j); //get columns
                Matrix dot= d1.arrayTimes(d2); //dot product
                double score=0;
                for(int k=0;k<N;k++){
                    score= score + dot.get( k, 0 ); //calculate weight
                }
                docSim.set( i, j, score );
            }
        }

    }
    
    public static void giveInfo(int a, int b){
        Matrix d1= termDoc.getMatrix( 0, M-1, a-1, a-1);
        Matrix d2= termDoc.getMatrix( 0, M-1, b-1, b-1);
        String common= "";
        int count=0;
        for (int i=0;i<M-1;i++){
            if ((d1.get( i, 0 )>0.0)&&(d2.get( i, 0 )>0.0)){
                count++;
                common=common+allwords.get( i )+",";
            }
        }
        int weight=count*1000/totalCommonWords;
        //copy/paste into xml file
       if(weight!=0){ 
      System.out.println("<edge source=\""+a+"\" target=\""+b+"\">");
      System.out.println("<data key=\"weight\">"+weight+"\"</data>");
      System.out.println("<data key=\"attribute\">"+common+"</data>");
      System.out.println("</edge>");}
  
 
    }
    public static void totalCommonWords(int a, int b){
        Matrix d1= termDoc.getMatrix( 0, M-1, a-1, a-1);
        Matrix d2= termDoc.getMatrix( 0, M-1, b-1, b-1);
        for (int i=0;i<M-1;i++){
            if ((d1.get( i, 0 )>0.0)&&(d2.get( i, 0 )>0.0)){
                if(!commonwords.contains( allwords.get( i ) )){
                    commonwords.add( allwords.get( i ) );
                    totalCommonWords++;
                }
            }
        }
        

  
 
    } 
    

}
