package extractors;

import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import parser.ArticleParser;
import primitive.Article;

public class TFIDF implements SimilarityMeasure{
	List<String> list_words = new ArrayList<>();
	int number_of_documents;
	
	public void setNumberOfDocuments(int value){
		number_of_documents = value;
	}
	
	public void loadKeyWords(String filename){
		try {
			FileInputStream fstream = new FileInputStream(filename);
			DataInputStream in = new DataInputStream(fstream);
			BufferedReader br = new BufferedReader(new InputStreamReader(in));
			String line;
			
			while ((line = br.readLine()) != null) {				
				String[] read_terms = line.split(" ");
				
				for (int i = 0; i < read_terms.length; ++i) {
					list_words.add(read_terms[i]);
				}				
			}
			
			in.close();
			
		} catch (Exception e) {
			System.out.println("Error: " + e.getMessage());
		}
	}
	
	public double similarity(Article art1, Article art2){
		double output = 0.0;
		
		ArticleParser article_pareser = new ArticleParser();
		
		Article cleared_article_1 = article_pareser.remove_all(art1);
		Article cleared_article_2 = article_pareser.remove_all(art2);
		
		List<String> art1_words = new ArrayList<>();
		List<String> art2_words = new ArrayList<>();
		
		for(String s :cleared_article_1.getBody().split(" ")){
			art1_words.add(s);
		}
		
		for(String s :cleared_article_2.getBody().split(" ")){
			art2_words.add(s);
		}
		
		List<List<String>> list_of_articles = new ArrayList<>();
		list_of_articles.add(art1_words);
		list_of_articles.add(art2_words);
		
		List<List<Double>> results = new ArrayList<>(); 
		
		for(List<String> a_article : list_of_articles){
			List<Double> aa = new ArrayList<>();
			for(String a_word : list_words){
				double _tf = termFreq(a_word, a_article, list_words);
				double _idf = IDF(a_word, list_of_articles, number_of_documents, list_words);
				
				double _tfidf = _tf * _idf;		
				aa.add(_tfidf);
			}					
			results.add(aa);
		}
		
		double licznik = 0.0;
		
		for(int j=0; j<results.get(0).size(); ++j){
			licznik += results.get(0).get(j) * results.get(1).get(j);
		}
		
		double mianownik = 0.0;
		
		for(List<Double> d : results){
			double abc = 0.0;
			for(Double da : d){
				abc+= da*da;
			}
			mianownik += Math.sqrt(abc);
		}
		
		return licznik/mianownik;
	}
	
	//---------------------
	
	private double termFreq(String word, List<String> doc, List<String> terms){
		double count=0.0;
		
		for(String s : doc){
			if(s.compareTo(word) == 0){
				count+=1.0;
			}
		}
		
		double countM = 0.0;
		for(String a : terms){
			for(String s : doc){
				if(s.compareTo(a) == 0){
					countM+=1.0;
				}
			}
		}
		
		double output = countM > 0 ? count/countM : 0.0 ;
		
		return output;
	}
	
	private double IDF(String word, List<List<String>> list_of_doc, int number_of_documents, List<String> terms){
		double count = 0.0;
		
		for(List<String> article_ : list_of_doc){
			double res = termFreq(word, article_, terms);
			
			if(res > 0.0){
				count+=1.0;
			}
		}
		
		//double output = list_of_doc.size() > 0 ? Math.log((double)list_of_doc.size()/(count+1.0)) : 0.0;
		double output = list_of_doc.size() > 0 ? Math.log(number_of_documents/(count+1.0)) : 0.0;
		
		return output;
	}
}
