package business.application.test_bed;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;

import business.application.*;//.TFCalculatorNoDB.TF_DOC;
import business.application.TFCalculatorNoDB.TF_DOC;
import business.search_eng.Document;
import business.search_eng.PostingList;
import business.search_eng.PostingListItem;
import business.search_eng.cleaner.EnglishStemmer;


import com.mysql.jdbc.ResultSet;
import com.mysql.jdbc.Statement;

/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */

/**
 *
 * @author marcgonzalez
 */
public class DBServerStandAlone {

    public HashMap<String, PostingList> vocabulary;
    public HashMap<String, Document> collection;
    boolean useThisQuery = false;
    
    private int numOfFiles = 32; //0: NO LIMIT
    private ArrayList<Document> documentList;

    public DBServerStandAlone(int numOfFiles) {

    	this.numOfFiles=numOfFiles;
    	if(numOfFiles <=0) {
    		if(numOfFiles<0) {
    			useThisQuery = true;
    		} this.numOfFiles = 20100;
		}
    
        vocabulary = new HashMap<String, PostingList>();
        collection = new HashMap<String, Document>();
        
        documentList = new ArrayList<Document>();
        try {
          //FIRST: Fill the overlay ith colleciton
          fillOverlayNetworkWithCollection();
          //SECOND: Fill the on with the posting lists k->v word->postinglist
          fillOverlayNetworkWithVocabulary(true);
        } catch(Exception ex) { ex.printStackTrace(); }


    }


	private void fillOverlayNetworkWithCollection() {

		//documentList = new ArrayList<Document>();
		EnglishStemmer stemmer = new EnglishStemmer();

		Statement stmt = null;
		ResultSet srs = null;

		//new connection to BBDD
		try {
                    Class.forName("com.mysql.jdbc.Driver").newInstance();
                    Connection conn = DriverManager.getConnection("jdbc:mysql://localhost/tfcmargon?" +
                    "user=zasca&password=zasca2009");

                    stmt = (Statement) conn.createStatement();//(ResultSet.TYPE_SCROLL_SENSITIVE,ResultSet.CONCUR_READ_ONLY);
                    if(!useThisQuery) {
                    	srs = (ResultSet) stmt.executeQuery("SELECT id, title, subtitle, author, content, url, date, topic FROM tfcmargon.news_rawtext");
                    } else {
                    	System.out.println("news and life");
                    	//srs = (ResultSet) stmt.executeQuery("SELECT id, title, subtitle, author, content, url, date, topic FROM tfcmargon.news_rawtext WHERE topic like 'life' or topic like 'news' or topic like 'spam'");
                    	srs = (ResultSet) stmt.executeQuery("SELECT id, title, subtitle, author, content, url, date, topic FROM tfcmargon.news_rawtext WHERE topic like 'life'" +
                    			//" or topic like 'news'" +
                    			" or topic like 'spam'");

                    }

                    //do select & cross all resultset (now files)
                    int i=0;
                    System.out.println("Adding files to the DHT");
		    while (srs.next() && i < numOfFiles) {
		    	i++;
		    	if(i%100==0) { System.out.println(i+" of "+numOfFiles+" done."); }

				//retrieve all fields
		        int id = srs.getInt("id");
		        if(id ==20001){
		        	System.out.println("SPAM"); }

		        String title = srs.getString("title");
		        String subtitle = srs.getString("subtitle");
		    	String content = srs.getString("content");
		        String author = srs.getString("author");
		        String url = srs.getString("url");
		    	String date = srs.getString("date");
		        String topic = srs.getString("topic");

		        // Insert data into the DHT
				Document document = new Document(id, title, subtitle, author, content, url, date, topic);
				Document parsed = new Document(id, stemmer.Stemm(title), stemmer.Stemm(subtitle), author, stemmer.Stemm(content), url, date, topic);

				documentList.add(parsed);

				collection.put("id//"+String.valueOf(id),document);
		    }

			conn.close();

		} catch (InstantiationException e) {
			e.printStackTrace();
		} catch (IllegalAccessException e) {
			e.printStackTrace();
		} catch (ClassNotFoundException e) {
			e.printStackTrace();
		} catch (SQLException e) {
			e.printStackTrace();
		}
		finally {
		    // it is a good idea to release resources in a finally{} block
		    // in reverse-order of their creation if they are no-longer needed
		    if (srs != null) {
		        try {
		            srs.close();
		        } catch (SQLException sqlEx) { } // ignore

		        srs = null;
		    }
		    if (stmt != null) {
		        try {
		            stmt.close();
		        } catch (SQLException sqlEx) { } // ignore

		        stmt = null;
		    }
		}
	}

	private  void fillOverlayNetworkWithVocabulary(boolean serialize) {
		TFCalculatorNoDB tfCalculator = TFCalculatorNoDB.INSTANCE;

		if(serialize) {

		Iterator<Document> itDoc = documentList.iterator();
		Document tempDoc = null;
		int i =0;
                System.out.println("Analysing TF of the docs");
		while ( itDoc.hasNext()) {
	    	if(i%100==0) { System.out.println(i+" of "+numOfFiles+" done."); }

	        tempDoc = itDoc.next();
	        //calcule tf of every document
	        tfCalculator.fillDocumentDictionary(tempDoc.id ,tempDoc.getFieldsNames(),tempDoc.getFieldsContents());
	        i++;

		}
		
		//tfCalculator.invertedIndex.
		  FileOutputStream fos = null;
	      ObjectOutputStream out = null;
	      try
	      {
	        fos = new FileOutputStream("invindex.idx");
	        out = new ObjectOutputStream(fos);
	        out.writeObject(tfCalculator.invertedIndex);
	        out.close();
	      }
	      catch(IOException ex)
	      {
	        ex.printStackTrace();
	      }
		}
		else {
			//PersistentTime time = null;
			   FileInputStream fis = null;
			   ObjectInputStream in = null;
			   try
			   {
			     fis = new FileInputStream("invindex.idx");
			     in = new ObjectInputStream(fis);
			     tfCalculator.invertedIndex = (HashMap)in.readObject();
			     in.close();
			   }
			   catch(IOException ex)
			   {
			     ex.printStackTrace();
			   }
			   catch(ClassNotFoundException ex)
			   {
			     ex.printStackTrace();
			   }	
		}
				

			int i=0;
			System.out.println("Adding references to the DHT");
			Iterator<String> bagOfWord = tfCalculator.invertedIndex.keySet().iterator();
			while(bagOfWord.hasNext()) {
	
				HashMap<String, Double> fieldsValues = new HashMap<String, Double>();
	
				//HashMap<Object,Object> dhtnode =  (HashMap<Object,Object>) nodeList.get( (int) Math.round( Math.random()*(numOfNodes-1) ) );
	
	
	                        if(i%100==0 || i ==0) {
	                                System.out.println(i+" of "+tfCalculator.invertedIndex.keySet().size()+" done.");
	                        }
	
	
				String mainTerm = bagOfWord.next();
				if(!TFCalculatorNoDB.isPunctuation(mainTerm) ) {
					PostingList postingList = new PostingList(mainTerm);
					Iterator<TF_DOC> tf_doc_s  = tfCalculator.invertedIndex.get(mainTerm).iterator();
					while(tf_doc_s.hasNext()) {
						TF_DOC temp_tf_doc = tf_doc_s.next();
						if(isSpam(temp_tf_doc.id)) {
							fieldsValues.put("reputation", -5.0);
						}else {
							fieldsValues.put("reputation", 1.0);//dhtnode.reputation);
						}
						PostingListItem item = new PostingListItem("id//"+temp_tf_doc.id, temp_tf_doc.termFreqs,fieldsValues);
						postingList.addDocument(item);
					}
	
					vocabulary.put(postingList.word,postingList);
	
				}
				i++;
	        }


	}


	private boolean isSpam(Integer id) {
		
		return collection.get("id//"+String.valueOf(id)).author.equals("spam")?true:false;
	}

}

