package mak.twitterSentiment.classes;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.List;

import javax.servlet.http.HttpServlet;

import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;

import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;

import com.google.gson.Gson;

import weka.attributeSelection.InfoGainAttributeEval;
import weka.attributeSelection.Ranker;
import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.classifiers.functions.Logistic;
import weka.classifiers.functions.SMO;
import weka.classifiers.meta.FilteredClassifier;
import weka.core.Instances;
import weka.core.converters.ConverterUtils.DataSource;
import weka.filters.Filter;
import weka.filters.supervised.attribute.AttributeSelection;
import weka.filters.unsupervised.attribute.StringToWordVector;

//Class for searching tweets and give predictions for them
public class QuerySearch extends HttpServlet{
	protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
		ConnectionToDB c = new ConnectionToDB();
		c.connect();
		String path = getServletConfig().getServletContext().getRealPath("");
		response.setContentType("text/html; charset=UTF-8");
		HttpSession session = request.getSession(true);
		PrintWriter myOut = response.getWriter();
		Preprocessing pre = new Preprocessing(path, c);
	    ArrayList<String> newStrLine = new ArrayList<String>();
	    ArrayList<Tweet> tweet_preprocessed = new ArrayList<Tweet>();
	    ArrayList<String> predictions = new ArrayList<String>();
	    int valid = 0;
	    FileWriter fstream_test = new FileWriter(path + "/test.arff");
		BufferedWriter out_test = new BufferedWriter(fstream_test);
		out_test.write("@RELATION greek" + "\n");
	    out_test.write("@ATTRIBUTE class {positive, negative, neutral}" + "\n");
	    out_test.write("@ATTRIBUTE text STRING" + "\n");
	    out_test.write("@DATA" + "\n");
		try{
			String testSet =  request.getParameter("testSet");
			PreprocessingResult pre_result = new PreprocessingResult();
			session.setAttribute("termInJSP", "");
			if(testSet ==""){
				String termInJSP=request.getParameter("term"); //the term to search for
				termInJSP = new String(termInJSP.getBytes("ISO-8859-1"), "UTF-8");
				session.setAttribute("termInJSP", termInJSP);
				termInJSP = termInJSP.replace("#", "%23");
		   		String termURL = "http://search.twitter.com/search.json?lang=el&rpp=500&q=" +termInJSP + "+exclude:retweets"; //search api url
		   		URL url = new URL(termURL);
			    URI uri = new URI(url.getProtocol(), url.getUserInfo(), url.getHost(), url.getPort(), url.getPath(), url.getQuery(), url.getRef());
			    url = new URL(uri.toASCIIString());
			    HttpURLConnection connection = (HttpURLConnection) url.openConnection();
			    connection.setRequestMethod("GET");
			    InputStream json = connection.getInputStream();
			    Gson gson = new Gson();
			    Reader reader = new InputStreamReader(json);         
		        SearchResponse responseJson = gson.fromJson(reader, SearchResponse.class);
		        List<Result> results = responseJson.results;		//returned tweets 
		        for(int i=0; i<results.size(); i++ ){		
		        	String text = results.get(i).text;
		        	if(!text.startsWith("RT")){		//every tweet that doesn't start with RT gets in the test set
			        	newStrLine.add(text);
			        	pre_result = pre.stemWords(text); //preprocessing of the tweet
			        	tweet_preprocessed.add(new Tweet());
			        	tweet_preprocessed.get(valid).setText(pre_result.getSentence());	//set the text of the tweet
			        	tweet_preprocessed.get(valid).setNegationToken(pre_result.getNegation());	//set if there is negation in the tweet
			        	tweet_preprocessed.get(valid).setTokens_stem(pre_result.getStems());	//set the tokens of the tweet after stemming
			        	tweet_preprocessed.get(valid).setPos(pre_result.getPos()); 		//set the parts of speech of the tweet
			            out_test.write("positive,\"" + tweet_preprocessed.get(valid).getText() + "\"" + "\n");		//write tweet to test set
			            valid++;
		        	}
		        }
		       
			}
			else{
				//Same procedure for the ready test sets
				FileInputStream fstream = new FileInputStream(path + "/" + testSet);
				DataInputStream in = new DataInputStream(fstream);
				BufferedReader br = new BufferedReader(new InputStreamReader(in, "UTF-8"));
				String line;
				while ((line = br.readLine()) != null)   {
					if(!line.startsWith("RT")){
			        	newStrLine.add(line);
			        	pre_result = pre.stemWords(line);
			        	tweet_preprocessed.add(new Tweet());
			        	tweet_preprocessed.get(valid).setText(pre_result.getSentence());
			        	tweet_preprocessed.get(valid).setNegationToken(pre_result.getNegation());
			        	tweet_preprocessed.get(valid).setTokens_stem(pre_result.getStems());
			        	tweet_preprocessed.get(valid).setPos(pre_result.getPos());
			            out_test.write("positive,\"" + tweet_preprocessed.get(valid).getText() + "\"" + "\n");
			            valid++;
		        	}
				}
				  
			}
			out_test.close();
		    fstream_test.close();
		    ConnectionToDB.close(c.getConnection());
		   
		    //Classification process
		    Instances train = null;
		    Instances test = null; 
		    DataSource source1 = new DataSource(path + "/training_feat.arff");		//get the training set
			train = source1.getDataSet();
			train.setClassIndex(0);
			DataSource source2 = new DataSource(path + "/test.arff"); 	//get the test set
		 	test = source2.getDataSet();
		 	test.setClassIndex(0);
			
		 	FilteredClassifier cls = (FilteredClassifier) weka.core.SerializationHelper.read(path + "/classifier.model");	//load model
		 	Evaluation evaluation = new Evaluation(test);
		 	
			int i, a, count_feat;
		    ArrayList<TweetBean> beans = new ArrayList<TweetBean>();   //ArrayList of TweetBeans - one bean for every tweet
		    ArrayList<String> tweetTexts = new ArrayList<String>();
		    for (i = 0; i < test.numInstances(); i++) {
		    	ArrayList<String> features = new ArrayList<String>();
		    	ArrayList<String> feature_pos = new ArrayList<String>();
		    	TweetBean bean = new TweetBean();
		  
		    	double pred = evaluation.evaluateModelOnce(cls, test.instance(i));	//evaluate model for every tweet 
		    	count_feat = 0;
		    	
		    	//If prediction is positive class
		    	if(pred == 0){
		    		bean.setClass1("predicted_positive");
		    		bean.setClass2("twtpositive");
		    		
		    		//Find the features on which prediction is based
			    	for(a = 0; a<train.numAttributes() - 1; a++){
			    			for(int t=0; t<tweet_preprocessed.get(i).getTokens_stem().size(); t++){
			    				if((train.attribute(a).name().equals(tweet_preprocessed.get(i).getTokens_stem().get(t))) && (count_feat<2)){
			    					count_feat++;
			    					features.add(train.attribute(a).name());
			    					feature_pos.add(tweet_preprocessed.get(i).getPos().get(t).getCategory());
			    				}
			    			}
			    			
			    			//Reverse polarity if there is negation in the tweet
			    			if((tweet_preprocessed.get(i).getNegationToken()).equals(train.attribute(a).name())){
			    				//If it is positive, make it negative
			    				if(pred == 0){
			    					bean.setClass1("predicted_negative");
			    		    		bean.setClass2("twtnegative");
			    					pred = 1;
			    				}
			    				//If it is negative, make it positive
			    				else if(pred == 1){
			    					bean.setClass1("predicted_positive");
			    		    		bean.setClass2("twtpositive");
			    					pred = 0;
			    				}
			    			}
			    		}
			    		bean.setParts_of_speech(feature_pos);
		    	}
		    	
		    	//If prediction is negative class
		    	else if(pred == 1){ 
		    		bean.setClass1("predicted_negative");
		    		bean.setClass2("twtnegative");
		    		
		    		//Find the features on which prediction is based
		    		for(a = 0; a<train.numAttributes() - 1; a++){
			    			for(int t=0; t<tweet_preprocessed.get(i).getTokens_stem().size(); t++){
			    				if((train.attribute(a).name().equals(tweet_preprocessed.get(i).getTokens_stem().get(t))) && (count_feat<2)){
			    					count_feat++;
			    					features.add(train.attribute(a).name());
			    					feature_pos.add(tweet_preprocessed.get(i).getPos().get(t).getCategory());
			    				}
			    			}
			    			
			    			//Reverse polarity if there is negation in the tweet
			    			if((tweet_preprocessed.get(i).getNegationToken()).equals(train.attribute(a).name())){
			    				//If it is positive, make it negative
			    				if(pred == 0){
			    					bean.setClass1("predicted_negative");
			    		    		bean.setClass2("twtnegative");
			    					pred = 1;
			    				}
			    				//If it is negative, make it positive
			    				else if(pred == 1){
			    					bean.setClass1("predicted_positive");
			    		    		bean.setClass2("twtpositive");
			    					pred = 0;
			    				}
			    			}
			    		}
			    		bean.setParts_of_speech(feature_pos);
		    	}
		    	
		    	//If prediction is neutral class
		    	else if(pred == 2){
		    		bean.setClass1("predicted_neutral");
		    		bean.setClass2("twtneutral");
		    		
		    		//Find the features on which prediction is based
		    		for(a = 0; a<train.numAttributes() - 1; a++){
			    			for(int t=0; t<tweet_preprocessed.get(i).getTokens_stem().size(); t++){
			    				if((train.attribute(a).name().equals(tweet_preprocessed.get(i).getTokens_stem().get(t))) && (count_feat<2)){
			    					count_feat++;
			    					features.add(train.attribute(a).name());
			    					feature_pos.add(tweet_preprocessed.get(i).getPos().get(t).getCategory());
			    				}
			    			}
			    		}
			    		bean.setParts_of_speech(feature_pos);
			    	}
		    	
		    		//Store all the information of the tweet to a bean
			    	bean.setFeatures(features);
			    	predictions.add(test.classAttribute().value((int) pred));
			    	bean.setPred(test.classAttribute().value((int) pred));
			    	bean.setText(newStrLine.get(i));
			    	tweetTexts.add(newStrLine.get(i));
			    	beans.add(bean);
		    	}
			    
		request.setAttribute("beans", beans);
	    request.getRequestDispatcher("results.jsp").forward(request, response);
	    session.setAttribute("tweetTexts", tweetTexts);
	    
	    
		}catch(Exception e){
			System.out.println(e);
		}
	}
	
	
}
