package mak.twitterSentiment.classes;
import gr.aueb.cs.nlp.postagger.SmallSetFunctions;
import gr.aueb.cs.nlp.postagger.WordWithCategory;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;

import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;

import weka.attributeSelection.InfoGainAttributeEval;
import weka.attributeSelection.Ranker;
import weka.classifiers.functions.SMO;
import weka.classifiers.meta.FilteredClassifier;
import weka.core.Instances;
import weka.core.converters.ArffSaver;
import weka.core.converters.ConverterUtils.DataSource;
import weka.filters.Filter;
import weka.filters.MultiFilter;
import weka.filters.supervised.attribute.AttributeSelection;
import weka.filters.unsupervised.attribute.StringToWordVector;

import com.google.gson.Gson;

//Class for the training of the system
public class addToTraining extends HttpServlet{
	protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
		response.setContentType("text/html; charset=UTF-8");
		String path = getServletConfig().getServletContext().getRealPath("");
		try{
		    HttpSession session = request.getSession(true);
			response.setCharacterEncoding("UTF-8");
			
			//Get the parameters
			String labels[] = request.getParameterValues("label");
			String stems[] = request.getParameterValues("stem");
			String pos[] = request.getParameterValues("pos");
			String polarities[] = request.getParameterValues("polarity");
			if(stems != null){
				for(int i=0; i<stems.length; i++){
					//Check if the user has marked any feature's stem to have the wrong polarity
					if(polarities[i]!=""){
						stems[i] = new String(stems[i].getBytes("ISO-8859-1"), "UTF-8");	//get the stem
						pos[i] = new String(pos[i].getBytes("ISO-8859-1"), "UTF-8");		//get the part of speech
						polarities[i] = new String(polarities[i].getBytes("ISO-8859-1"), "UTF-8");	//get the polarity
						insertStemToDB(stems[i], pos[i], polarities[i]);	//insert stem, part of speech and polarity to database
					}
				}
			}
			
			String words[] = request.getParameterValues("word");
			trainWithWords(labels, words, path);	//user has provided both representatives words and correct labels
			
			//Build classifier
			buildClassifier(path);
			
			//Forward to next page
			request.getRequestDispatcher("training.jsp").forward(request, response);
			
		}
		catch(Exception e){
			System.out.println(e);
		}
		 
	}
	
	//Training with both words and labels
	public void trainWithWords(String[] labels, String[] words, String path){
		int i, j;
		//Connect to database
		ConnectionToDB c = new ConnectionToDB();
		c.connect();
		Preprocessing pre = new Preprocessing(path, c);
		try{
			//File of the training set
			FileWriter fstream = new FileWriter(path + "/training.arff", true);
			BufferedWriter out = new BufferedWriter(fstream);
			//File that keeps the original tweets
			FileWriter fstream2 = new FileWriter(path + "/trainingOrig.txt", true);
			BufferedWriter out2 = new BufferedWriter(fstream2);
			//For every correct label that user has provided
			for(i = 0; i<labels.length; i++){
				if((words[i]!="") && (labels[i]!="")){
					words[i] = new String(words[i].getBytes("ISO-8859-1"), "UTF-8");
					labels[i] = new String(labels[i].getBytes("ISO-8859-1"), "UTF-8");
					
					String termURL = "http://search.twitter.com/search.json?q="+words[i]+"&lang=el&rpp=8";	//search for tweets that contain the representative word
					URL url = new URL(termURL);
					URI uri = new URI(url.getProtocol(), url.getUserInfo(), url.getHost(), url.getPort(), url.getPath(), url.getQuery(), url.getRef());
					url = new URL(uri.toASCIIString());
					HttpURLConnection connection = (HttpURLConnection) url.openConnection();
					connection.setRequestMethod("GET");
					    
					InputStream json = connection.getInputStream();
					Gson gson = new Gson();
					Reader reader = new InputStreamReader(json);         
					SearchResponse responseJson = gson.fromJson(reader, SearchResponse.class);
					List<Result> results = responseJson.results;	//returned tweets
					    
					    
					for(j=0; j<results.size(); j++ ){
						String text = results.get(j).text;
						PreprocessingResult text_stemmed = new PreprocessingResult();
						text_stemmed = pre.stemWords(text);		//stemmed tweet
						out2.write(text + "\n");			//add tweet to the file of the original tweets
						out.write(labels[i] + ",\"" + text_stemmed.getSentence() + "\"" + "\n");	//add tweet to the training set
					}
				}
			}
			out.close();
			out2.close();
		    fstream.close();
		    fstream2.close();
		    //Close connection to database
		    ConnectionToDB.close(c.getConnection());
		}catch(Exception e){
			
		}
	}

	
	//Insert stem, part of speech and correct polarity to database
	public void insertStemToDB(String stem, String pos, String polarity){
		ConnectionToDB c = new ConnectionToDB();
		c.connect();
		c.insertToDB(stem, pos, polarity);
		ConnectionToDB.close(c.getConnection());
	}
	
	//Build classifier
	public void buildClassifier(String path){
		try{
			DataSource source1 = new DataSource(path + "/training.arff");	//get the training set
			Instances train = source1.getDataSet();
			train.setClassIndex(0);
			Instances train_sel = null;
			StringToWordVector filter = new StringToWordVector();	//StringToWordVector filter
		 	String[] options2 = new String[5];
		 	options2[0] = "-W";
		 	options2[1] = "1000";
		 	options2[2] = "-S";
		 	options2[3] = "-stopwords";
		 	options2[4] = path + "/stopwords.txt";
		 	filter.setOptions(options2);
		 	
		 	//Attribute selection
		 	AttributeSelection attsel = new AttributeSelection(); 	                     
			InfoGainAttributeEval ev = new InfoGainAttributeEval();
			Ranker search = new Ranker();
			String[] options = new String[4];
			options[0] = "-N";
			options[1] = "-1";
			options[2] = "-T";
			options[3] = "0";
			search.setOptions(options);
			attsel.setEvaluator(ev);
			attsel.setSearch(search);
			
			Filter[] filters = new Filter[2];
			filters[0] = filter;
			filters[1] = attsel;
			MultiFilter multi = new MultiFilter();
			multi.setFilters(filters);
			multi.setInputFormat(train);
			train_sel = Filter.useFilter(train, multi);
			train_sel.setClassIndex(0);
			ArffSaver saver = new ArffSaver();
			saver.setInstances(train_sel);
			saver.setFile(new File(path + "/training_feat.arff"));		//keep the .arff file after attribute selection
			saver.writeBatch();
			FilteredClassifier cModel = (FilteredClassifier)new FilteredClassifier();
			SMO smo = new SMO();
			cModel.setFilter(multi);
			cModel.setClassifier(smo);
			cModel.buildClassifier(train);
			
			weka.core.SerializationHelper.write(path + "/classifier.model", cModel);	//save model
		}catch(Exception e){
			System.out.println(e);
		}
	}
	
}
