/**
 * This file is part of Sonedyan.
 * 
 * Sonedyan is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License as published by the Free Software Foundation;
 * either version 3 of the License, or (at your option) any
 * later version.
 *
 * Sonedyan is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied
 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
 * PURPOSE.  See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public
 * License along with Octave; see the file COPYING.  If not
 * see <http://www.gnu.org/licenses/>.
 * 
 * Copyright (C) 2009-2013 Jimmy Dubuisson <jimmy.dubuisson@gmail.com>
 */

package org.unige.mpej.eckmann.sonedyan.twitter;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.log4j.Logger;
import org.apache.commons.lang3.StringUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.core.KeywordAnalyzer;
import org.apache.lucene.analysis.core.SimpleAnalyzer;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.Version;
import org.unige.mpej.eckmann.sonedyan.db.DatabaseHandler;

import com.twitter.Extractor;

/**
 * Indexer of Stanford Twitter 2009 database
 * 
 * https://github.com/twitter/twitter-text-java
 */
public class DatabaseIndexer 
{
	private static Logger log = Logger.getLogger(org.unige.mpej.eckmann.sonedyan.twitter.DatabaseIndexer.class);
	
	private static final String DIR_PATH = "/home/jimmy/lucene";
	private static final int NUM_ROWS = 476553560;
	
	/*
	private static final Pattern HASHTAG_PATTERN = Pattern.compile("\\B#[a-zA-Z][a-zA-Z0-9]+");
	private static final Pattern HASHTAG_PATTERN2 = Pattern.compile("#\\w+");
	// http://erictarn.com/post/1060722347/the-best-twitter-hashtag-regular-expression
	private static final Pattern HASHTAG_PATTERN3 = Pattern.compile("\\B#\\w*[a-zA-Z]+\\w*");
	
	private static final Pattern USERNAME_PATTERN = Pattern.compile("\\B@\\w*[a-zA-Z]+\\w*");
	// http://shahmirj.com/blog/extracting-twitter-usertags-using-regex
	private static final Pattern USER_PATTERN2 = Pattern.compile("\\B@([a-zA-Z0-9_]{1,15})");
	
	private static final Pattern SHARP_PATTERN = Pattern.compile("#");
	private static final Pattern AROBAS_PATTERN = Pattern.compile("@");
	*/
	
	/**
	 * main method
	 */
	public static void main(String args[])
	{
		try
		{
			Directory index = new SimpleFSDirectory(new File(DIR_PATH));
			DatabaseHandler dbHandler = new DatabaseHandler();
			
			// http://www.johnandcailin.com/blog/cailin/tokenizing-twitter-posts-lucene
			// http://stackoverflow.com/questions/1841331/what-analyzer-should-i-use-for-a-url-in-lucene-net
			Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
			analyzerPerField.put("id", new KeywordAnalyzer());
			analyzerPerField.put("time", new KeywordAnalyzer());
			analyzerPerField.put("screenName", new KeywordAnalyzer());
			analyzerPerField.put("reply", new KeywordAnalyzer());
			
			analyzerPerField.put("hashTags", new WhitespaceAnalyzer(Version.LUCENE_41));
			analyzerPerField.put("mentions", new WhitespaceAnalyzer(Version.LUCENE_41));
			analyzerPerField.put("urls", new WhitespaceAnalyzer(Version.LUCENE_41));

			PerFieldAnalyzerWrapper aWrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(Version.LUCENE_41), analyzerPerField);
			
			IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_41, aWrapper);
	
			IndexWriter w = new IndexWriter(index, config);

			int limit = 0;
			int fetchSize = 1000000;
			
			PreparedStatement tweetPs = dbHandler.getConn().prepareStatement("SELECT id, time, screen_name, text FROM tweet LIMIT ?,?;");
			
			PreparedStatement hashPs = dbHandler.getConn().prepareStatement("INSERT INTO tweet_id_hashtag (tweet_id, hashtag) VALUES (?,?);");
			PreparedStatement mentionPs = dbHandler.getConn().prepareStatement("INSERT INTO tweet_id_mention (tweet_id, mention) VALUES (?,?);");
			PreparedStatement urlPs = dbHandler.getConn().prepareStatement("INSERT INTO tweet_id_url (tweet_id, url) VALUES (?,?);");
			PreparedStatement replyPs = dbHandler.getConn().prepareStatement("INSERT INTO tweet_id_reply (tweet_id, reply) VALUES (?,?);");
			
			BufferedWriter hashFs = new BufferedWriter(new FileWriter("hash_dump.sql"));
			BufferedWriter mentionFs = new BufferedWriter(new FileWriter("mention_dump.sql"));
			BufferedWriter urlFs = new BufferedWriter(new FileWriter("url_dump.sql"));
			BufferedWriter replyFs = new BufferedWriter(new FileWriter("reply_dump.sql"));
			
			while (limit < NUM_ROWS)
			{
				log.info("Current limit: " + limit + " (over " + NUM_ROWS + ")");
				
				// String query = "SELECT id,screen_name,text FROM tweet LIMIT " + limit + "," + fetchSize + ";";
				tweetPs.setInt(1, limit);
				tweetPs.setInt(2, fetchSize);
				
				ResultSet rs = dbHandler.execute(tweetPs);	
				
				while (rs.next())
				{
					int id = rs.getInt("id");
					Date date = rs.getTimestamp("time");
					String screenName = rs.getString("screen_name");
					String text = rs.getString("text");
					
					Extractor extractor = new Extractor();
					  
					List<String> hashTags = extractor.extractHashtags(text);
					List<String> mentions = extractor.extractMentionedScreennames(text);
					List<String> urls = extractor.extractURLs(text);
					String reply = extractor.extractReplyScreenname(text);
					
					/*
					 * complete the Lucene index
					 */
					
					/*
					addDoc(w, id, date, screenName, hashTags, mentions, urls, reply, text);
					*/
					
					/*
					 * complete the Mysql database
					 */
					
					/*
					// add hashtags
					for (String hashTag:hashTags)
					{
						hashPs.setInt(1, id);
						hashPs.setString(2, hashTag);
						dbHandler.update(hashPs);
					}
					
					// add mentions
					for (String mention:mentions)
					{
						mentionPs.setInt(1, id);
						mentionPs.setString(2, mention);
						dbHandler.update(mentionPs);
					}
					
					// add urls
					for (String url:urls)
					{
						urlPs.setInt(1, id);
						urlPs.setString(2, url);
						dbHandler.update(urlPs);
					}
					
					// add reply
					if (reply != null && reply.length() > 0)
					{
						replyPs.setInt(1, id);
						replyPs.setString(2, reply);
						dbHandler.update(replyPs);
					}
					*/
					
					/*
					 * generate SQL dumps
					 */
					
					String s;
					
					// add hashtags
					for (String hashTag:hashTags)
					{
						s = "INSERT INTO tweet_id_hashtag VALUES (" + id + ", \"" + hashTag + "\");";
						
						hashFs.write(s);
						hashFs.newLine();
					}
					
					// add mentions
					for (String mention:mentions)
					{
						s = "INSERT INTO tweet_id_mention VALUES (" + id + ", \"" + mention + "\");";
						
						mentionFs.write(s);
						mentionFs.newLine();
					}
					
					// add urls
					for (String url:urls)
					{
						s = "INSERT INTO tweet_id_url VALUES (" + id + ", \"" + url + "\");";
						
						urlFs.write(s);
						urlFs.newLine();
					}
					
					// add reply
					if (reply != null && reply.length() > 0)
					{
						s = "INSERT INTO tweet_id_reply VALUES (" + id + ", \"" + reply + "\");";
						
						replyFs.write(s);
						replyFs.newLine();
					}
				}
				
				limit += fetchSize;
			}
			
			hashFs.close();
			mentionFs.close();
			replyFs.close();
			urlFs.close();
			
			w.close();
		}
		catch(Exception e)
		{
			log.error("An error occured: " + e.getMessage());
		}
	}
	
	/**
	 * add Lucene document
	 */
	private static void addDoc(IndexWriter w, int id, Date date, String screenName, List<String> hashTags, List<String> mentions, List<String> urls, String reply, String text) throws IOException 
	{
		  Document doc = new Document();
		  
		  doc.add(new IntField("id", id, Field.Store.YES));
          doc.add(new LongField("time", date.getTime(), Field.Store.YES));
          doc.add(new StringField("screenName", screenName, Field.Store.YES));
          
          if (reply != null && reply.length() > 0)
          {
        	  doc.add(new StringField("reply", reply, Field.Store.YES));
          }
          
          if (hashTags != null && hashTags.size() > 0)
          {
        	  doc.add(new TextField("hashTags", StringUtils.join(hashTags, " "), Field.Store.YES));
          }
          
          if (urls != null && urls.size() > 0)
          {
        	  doc.add(new TextField("urls", StringUtils.join(urls, " "), Field.Store.YES));
          }
          
          if (mentions != null && mentions.size() > 0)
          {
        	  doc.add(new TextField("mentions", StringUtils.join(mentions, " "), Field.Store.YES));
          }
          
          if (text != null && text.length() > 0)
          {
        	  doc.add(new TextField("text", text, Field.Store.NO));
          }
          
		  w.addDocument(doc);
	}

}
