
/*
*Pool Public Licence version 0.1 or later
*Released on 20th April 2008.
*Any voilations can be repoted at paritosh@wikiocean.net
*What is treated as voilations can be found at www.wikiocean.net/ppl1-voilations* 
******************************************************************************
* The contents of this file are subject Affero GPL v3 and later with additional conditions.
* The additional conditions are to be considered as part of the Affero GPL v3 and later.
* The additional conditions will supersede the Affero GPLv3 and later in case of conflict.
* ("License"); You may not use this file except in compliance with the License
* You may obtain a copy of the License at http://www.gnu.org/licenses/agpl.html
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
* the specific language governing rights and limitations under the License.
* The Initial Developer of the Original Code is Paritosh Pungaliya.
* created by Paritosh Pungaliya are Copyright (C) Paritosh Pungaliya; All Rights Reserved.
* Contributions are Copyright (C) 2001-2008 Paritosh Pungaliya.

* Objective of the additional terms (license) 
* 1) Is to extend the software freedom to freedom to work.
* 2) To ensure that knowledge is free from monopoly of agencie(s).
* 3) To avoid a situation where big corporates or investors can buy out free software groups and companies and then start milking the communities built around it.
* (this trend can be seen how the openness changes in companies that get Venture Capital funding.)
* (if you agree to this logic I invite you to our group to work together)

* Additional conditions
*  0) Source code will also include
* 	a) data dictionaries, data ontologies which are used to alter the behavior of code or to control           the features of application.
* 	b) Any code generated or derived automatically or manually from this code or logic
*  1) The revenue generated by deploying, implementing , selling services and other activities
*  	 based on this source code should be shared 100%, between the people working on it.
*  2) Capital invested should be given only fixed rate of return or linked to revenue/surplus growth for limited time.
*  3) People working on the project should always have an option to replace the capital provider in full or part with lower cost capital.
*  4) Their should not be any ownership which can be sold as a claim on future profit to be generated from the work of people working on the code.
*  5) Brand thus generated should belong to the group.
*  6) For old and new members revenue sharing should be equal for same work equivalent.
*  7) The share of revenue should be decided by bidding for work before allocation of work within the group.
*  8) Before bidding various method of measuring the work should be clearly defined and published in public domain. Any change in process should also be publised in public domain imediately.
*  9) All data related to to revenue sharing should be published as and when generated in public domain.
*  10) For group of people having size more than 10 people will have to make it open online for others to participate in bids. 
*  11) All people involved in related and sub project will be treated as a group.
*  12) Once this license is being used for a certain code then all and any other software services being provided by the group should complusorilly come under 100% revenue sharing.
*  13) Cross subsidization should not happen under this revenue share, no additional benefit should be drawn from the people, by the employing agency.
*  14) Any position or knowledge or legal structure created relating to people working on code, which adversely affects the spirit of 100% revenue share will be treated as a violation.
*  15) Revenue sharing defined.
*     a) includes direct revenue and also intangible gains like brand, and organization.
*     b) No personal judgment should be involved in judging the distribution of revenue.It has to be pre-defined formula based.
*     c)100% of revenue should be shared before expenses and recievers should have control over what expenses to incur.
*     d)For the work done in any month by contributing group member, working on any project can be given revenue share from the work done by the group, for a maximum of 18 months after that month.
*     e)Revenue should be apportioned to the month in which work is done and not when received.
*  The group fulfilling the above conditions also has to be a wiki ecosystem further defined in www.wekosystem.org or read about Wekosystem at www.wikiocean.net

*  ******************************************************************************
*/  
package pool.basicReport;

import java.io.*;
import java.sql.*;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.Vector;

import timer.ExecutionTimer;
import timer.TimerRecordFile;
import utility.ConvertToLower;
import utility.Input;
import dbmanager.DBManager;
import dbmanager.GetIdName;
import file.*;
import file.FileWriter;

public class Transaction extends BasicReports
{
	ResultSet rsDir,rsFile,rsFileWrite;
	String transplTableName = "trans_property_details";
	String tableName = "transaction";
	Hashtable<Object,Object> TransactionMap;
	File transFile;
	FileWriter fWriter;
	StringBuffer CsvData;
	GetIdName ginTrans,ginMaster;
	Vector<Object> propertyList;
	Vector<Object> newPropertyList;
	public Transaction() 
	{	
	}
	public Transaction(DBManager sqlDB)
	{
		super(sqlDB);
		this.sqlDB = sqlDB; 
	}
	
	public void initialise(Vector<Object> oldPropertyList, Vector<Object> PropertyListNew, String process) 
	{
		ExecutionTimer t = new ExecutionTimer();
		 t.start();
		sqlDB = new DBManager();
		sqlDB.getConnect();
		this.process = process;
		this.path = Input.TRASACTIONREPORTSPATH;
		
		propertyList = new Vector<Object>();
		newPropertyList = new Vector<Object>();
		
		propertyList.addAll(oldPropertyList);
		try 
		{
			this.newPropertyList.addAll(PropertyListNew);
		} 
		catch (RuntimeException e) 
		{
			e.printStackTrace();
		}
		
		lockFiles(propertyList);	
		t.end();
		TimerRecordFile timerFile=new TimerRecordFile("pool.basicReport","Transaction","initialise",t.duration());
	
	}

	@SuppressWarnings("unchecked")
	private void lockFiles(Vector<Object> propertyList)
	{
		ExecutionTimer t = new ExecutionTimer();
		 t.start();
		FileWriter fwObject = null;
		ginMaster = new GetIdName(sqlDB);
		/**It stores the Hashtable of the propertyList vector.*/
		Hashtable<Object,Object> propertyMap;
		ResultSet fieldRS = sqlDB.getSelect("select fieldname from command where msgtype = '"+tableName+"' and from_master='yes' and process = 'add' order by order1");
		
		for(int i = 0; i < propertyList.size();i++ )
		{
			propertyMap = new Hashtable<Object, Object>();
			propertyMap.putAll((Hashtable<Object, Object>)propertyList.get(i));
			propertyMap = ConvertToLower.convertHashKey(propertyMap);
			try 
			{
				while (fieldRS.next())
				{	
					String colName = fieldRS.getString("fieldname").toLowerCase();
					String colNameDir = ginMaster.getId(colName);
					String path = Input.TRASACTIONREPORTSPATH + colNameDir;
					
					transFile = new File(path);
					
					if(transFile.exists()== false)
						transFile.mkdirs();
					
					try
					{	
						id = propertyMap.get(colName).toString();
						String key = id+"_"+colName;
						if(FileListHash.containsKey(key)==false)
						{
							String path1 = path+"//"+id+Input.FILRFORMAT;
								
							fwObject = new FileWriter(path1,"rws");
							RandomAccessFile fw = fwObject.fileOpen();
							if(fw.length()==0)
							{	
								ResultSet rs = sqlDB.getSelect("select fieldname from command where msgtype = '"+tableName+"' and process = 'add' order by order1");
								while (rs.next())
								{
									try 
									{
										String colHeading = rs.getString("fieldname").toLowerCase();
										fw.writeBytes(colHeading + Input.SEPARATOR);
									} 
									catch (Exception e1){ }
								}
								rs.close();
								fw.writeBytes(Input.ENDSEPARATOR);
							}
							FileListHash.put(key, fwObject);
						}
					} 
					catch (Exception e) { }
				}
				fieldRS.beforeFirst();
			}
			catch (Exception e) 
			{
				e.printStackTrace();
			}
		}
		t.end();
		TimerRecordFile timerFile=new TimerRecordFile("pool.basicReport","Transaction","lockFiles",t.duration());
	
	}
	
	
	@SuppressWarnings("unchecked")
	@Override
	synchronized public StringBuffer insertReport() 
	{
		ExecutionTimer t = new ExecutionTimer();
		 t.start();
		String id;
		ResultSet fieldRS;
		Hashtable<Object,Object> TransactionMap;
		
		for(int i = 0; i < propertyList.size(); i++)
		{
			TransactionMap = new Hashtable<Object, Object>();
			TransactionMap.putAll((Hashtable<Object, Object>)propertyList.get(i));
			TransactionMap = ConvertToLower.convertHashKey(TransactionMap);
			id = TransactionMap.get("id").toString();
			//String path = Input.TRASACTIONREPORTSPATH + id;
			
			if(id.equals("0") || id.equals("no$"))
			{
				return new StringBuffer();
			}
					
			try 
			{
				fieldRS = sqlDB.getSelect("select fieldname from command where msgtype = '"+tableName+"' and process = 'add' and from_master = 'yes' order by order1");
				
				while (fieldRS.next())
				{
					try 
					{
						String colName = fieldRS.getString("fieldname").toLowerCase();
						id = TransactionMap.get(colName).toString();
						
						String key = id+"_"+colName;
						FileWriter fw = (FileWriter) FileListHash.get(key);
						
						FileOperation fo = new FileOperation(fw.file);
						fo.addRecordToFile(TransactionMap);
					} 
					catch (Exception e) 
					{
						//e.printStackTrace();
					}
				}
				fieldRS.beforeFirst();
			}
			catch (Exception e) { }
		}
		
		closeAll();
		t.end();
		TimerRecordFile timerFile=new TimerRecordFile("pool.basicReport","Transaction","insertReport",t.duration());
	
		return null;
	}
	
	private void closeAll()
	{
		ExecutionTimer t = new ExecutionTimer();
		 t.start();
		Iterator<Object> enumrator = FileListHash.values().iterator();
		while(enumrator.hasNext())
		{
			try
			{
				FileWriter fw = (FileWriter)enumrator.next();
				fw.fileClose();
			}
			catch (Exception e) 
			{
				e.printStackTrace();
			}
		}
		t.end();
		TimerRecordFile timerFile=new TimerRecordFile("pool.basicReport","Transaction","closeAll",t.duration());
	
	}
	
	@SuppressWarnings("unchecked")
	@Override
	public StringBuffer updateReport() 
	{
		ExecutionTimer t = new ExecutionTimer();
		 t.start();
		lockFiles(newPropertyList); // It will locks the file for the updation operation.
		
		//bfile.lockFiles(newPropertyList);	
		
		Hashtable<Object, Object> oldPropertyMap;
		Hashtable<Object, Object> newPropertyMap;
		Hashtable<Object, Object> tempPropertyMap;
		Vector<Object> propertyVec = new Vector<Object>();
		int flagDelete = 0 ;
		StringBuffer sbrDTB = new StringBuffer();
		/*ResultSet rs = null;
		
		rs = sqlDB.getSelect("select fieldname from command where msgtype = '"+plTableName+"' and process='add' and from_master='yes' order by order1");
		*/

		ResultSet fieldRS = sqlDB.getSelect("select fieldname from command where msgtype = '"+tableName+"' and from_master='yes' and process = 'add' order by order1");
		
		for(int i = 0; i < newPropertyList.size();i++ )
		{
			newPropertyMap = new Hashtable<Object, Object>();
			oldPropertyMap = new Hashtable<Object, Object>();
			tempPropertyMap = new Hashtable<Object, Object>();
			
			newPropertyMap.putAll((Hashtable<Object, Object>)newPropertyList.get(i));
			newPropertyMap = ConvertToLower.convertHashKey(newPropertyMap);
			
			oldPropertyMap.putAll((Hashtable<Object, Object>)propertyList.get(i));
			oldPropertyMap = ConvertToLower.convertHashKey(oldPropertyMap);
		
			tempPropertyMap.putAll(oldPropertyMap);
			tempPropertyMap.putAll(newPropertyMap);
			propertyVec.add(tempPropertyMap);
			
			try 
			{
				while (fieldRS.next())
				{
					String colName = fieldRS.getString("fieldname").toLowerCase();
					try 
					{
						id = oldPropertyMap.get(colName).toString();
						String key = id+"_"+colName;
					
					
						FileWriter fw = (FileWriter) FileListHash.get(key);
						
						
						FileOperation fo = new FileOperation(fw.file);
						if(fo.deleteFileRecord(oldPropertyMap))
						{
							sbrDTB.append("Record Deleted Successfully ");
							flagDelete = 1;
						}
						else
						{
							System.out.println("basicReports::TransactionBasicReport::updateReport(): Unable To Find Record ");
							sbrDTB.append("Unable To Find Record ");
							//return sbrDTB;
						}
					} 
					catch (Exception e) 
					{
						e.printStackTrace();
					}
					//fo.addRecordToFile(propertyMap);
				}
				fieldRS.beforeFirst();
			}
			catch (Exception e) { }
		}
		
		if (flagDelete == 1)
		{
			propertyList = propertyVec;
			//Property_details pdbr = new Property_details(propertyVec,sqlDB, process);
			insertReport();
		}
		t.end();
		TimerRecordFile timerFile=new TimerRecordFile("pool.basicReport","Transaction","updateReport",t.duration());
	
		return sbrDTB;
	}
	@Override
	public StringBuffer deleteReport() {
		// TODO Auto-generated method stub
		return null;
	}
}
