
/*
*Pool Public Licence version 0.1 or later
*Released on 20th April 2008.
*Any voilations can be repoted at paritosh@wikiocean.net
*What is treated as voilations can be found at www.wikiocean.net/ppl1-voilations* 
******************************************************************************
* The contents of this file are subject Affero GPL v3 and later with additional conditions.
* The additional conditions are to be considered as part of the Affero GPL v3 and later.
* The additional conditions will supersede the Affero GPLv3 and later in case of conflict.
* ("License"); You may not use this file except in compliance with the License
* You may obtain a copy of the License at http://www.gnu.org/licenses/agpl.html
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
* the specific language governing rights and limitations under the License.
* The Initial Developer of the Original Code is Paritosh Pungaliya.
* created by Paritosh Pungaliya are Copyright (C) Paritosh Pungaliya; All Rights Reserved.
* Contributions are Copyright (C) 2001-2008 Paritosh Pungaliya.

* Objective of the additional terms (license) 
* 1) Is to extend the software freedom to freedom to work.
* 2) To ensure that knowledge is free from monopoly of agencie(s).
* 3) To avoid a situation where big corporates or investors can buy out free software groups and companies and then start milking the communities built around it.
* (this trend can be seen how the openness changes in companies that get Venture Capital funding.)
* (if you agree to this logic I invite you to our group to work together)

* Additional conditions
*  0) Source code will also include
* 	a) data dictionaries, data ontologies which are used to alter the behavior of code or to control           the features of application.
* 	b) Any code generated or derived automatically or manually from this code or logic
*  1) The revenue generated by deploying, implementing , selling services and other activities
*  	 based on this source code should be shared 100%, between the people working on it.
*  2) Capital invested should be given only fixed rate of return or linked to revenue/surplus growth for limited time.
*  3) People working on the project should always have an option to replace the capital provider in full or part with lower cost capital.
*  4) Their should not be any ownership which can be sold as a claim on future profit to be generated from the work of people working on the code.
*  5) Brand thus generated should belong to the group.
*  6) For old and new members revenue sharing should be equal for same work equivalent.
*  7) The share of revenue should be decided by bidding for work before allocation of work within the group.
*  8) Before bidding various method of measuring the work should be clearly defined and published in public domain. Any change in process should also be publised in public domain imediately.
*  9) All data related to to revenue sharing should be published as and when generated in public domain.
*  10) For group of people having size more than 10 people will have to make it open online for others to participate in bids. 
*  11) All people involved in related and sub project will be treated as a group.
*  12) Once this license is being used for a certain code then all and any other software services being provided by the group should complusorilly come under 100% revenue sharing.
*  13) Cross subsidization should not happen under this revenue share, no additional benefit should be drawn from the people, by the employing agency.
*  14) Any position or knowledge or legal structure created relating to people working on code, which adversely affects the spirit of 100% revenue share will be treated as a violation.
*  15) Revenue sharing defined.
*     a) includes direct revenue and also intangible gains like brand, and organization.
*     b) No personal judgment should be involved in judging the distribution of revenue.It has to be pre-defined formula based.
*     c)100% of revenue should be shared before expenses and recievers should have control over what expenses to incur.
*     d)For the work done in any month by contributing group member, working on any project can be given revenue share from the work done by the group, for a maximum of 18 months after that month.
*     e)Revenue should be apportioned to the month in which work is done and not when received.
*  The group fulfilling the above conditions also has to be a wiki ecosystem further defined in www.wekosystem.org or read about Wekosystem at www.wikiocean.net

*  ******************************************************************************
*/  
package pool.basicReport;

import java.io.*;
import java.sql.ResultSet;
import java.util.*;

import dbmanager.DBManager;
import timer.ExecutionTimer;
import timer.TimerRecordFile;
import utility.ConvertToLower;
import utility.Input;
import file.FileOperation;
import file.FileWriter;

/**
 * @author Divyesh,
 * @author PropertyDetailsBasicReports will generates the report for the property_details entry.
 *  Which extends the BasicReports class.
 * */
public class Property_details extends BasicReports 
{
	/**It stores the property_details as a table name.*/
	private String plTableName = "property_details"; 
	
	/**It will stores the multiple or single Hashtable as per the request for number of properties for the insertion or deletion.*/
	Vector<Object> propertyList;
	Vector<Object> newPropertyList;
	 
	//BlockFiles bfile; 
	/** fileHandler is the File object.*/
	File fileHandler; 
	/***//*
	Hashtable<Object, Object> FileListHash = new Hashtable<Object, Object>(); */
	ResultSet fieldRS = null;
	
	/**
	 * PropertyDetailsBasicReports() constructor takes parameters like:
	 * propertyList vector which stores all the property_details's PropertyMap, which has individual property.
	 * sqlDB is the object for the atabase class.
	 * process is the process like add, mod, etc. 
	 * */
	
	public Property_details()
	{
	  sqlDB = new DBManager();
	  sqlDB.getConnect();
	}
	public Property_details(DBManager sqlDB)
	{
		super(sqlDB);
		this.sqlDB = sqlDB; 
	}
	
	public void initialise(Vector<Object> oldPropertyList,Vector<Object> newPropertyList,String process)
	{ 

		ExecutionTimer t = new ExecutionTimer();
		 t.start();
		this.process = process;
		this.path = Input.MASTERREPORTSPATH;
		propertyList = new Vector<Object>();
		propertyList.addAll(oldPropertyList);
		try 
		{
			this.newPropertyList = new Vector<Object>();
			this.newPropertyList.addAll(newPropertyList);
		} catch (Exception e) {
		}
		
		lockFiles(propertyList);
		t.end();
		TimerRecordFile timerFile=new TimerRecordFile("pool.basicReport","Property_details","initialise",t.duration());
	
		/*bfile = new BlockFiles(sqlDB, process, tableName);
		bfile.lockFiles(propertyList);*/		
	}
	
	@SuppressWarnings("unchecked")
	private void lockFiles(Vector<Object> propertyList)
	{

		ExecutionTimer t = new ExecutionTimer();
		 t.start();
		FileWriter fw = null;
		/**It stores the Hashtable of the propertyList vector.*/
		Hashtable<Object,Object> propertyMap;
		fieldRS = sqlDB.getSelect("select fieldname from command where msgtype = '"+plTableName+"' and process='add' and from_master='yes' order by order1");
		
		for(int i = 0; i < propertyList.size();i++ )
		{
			propertyMap = new Hashtable<Object, Object>();
			propertyMap.putAll((Hashtable<Object, Object>)propertyList.get(i));
			propertyMap = ConvertToLower.convertHashKey(propertyMap);
			try 
			{
				while (fieldRS.next())
				{
					try
					{
						String colName = fieldRS.getString("fieldname").toLowerCase();
						id = propertyMap.get(colName).toString();
						String key = id+"_"+colName;
						if(id.equals("0") || id.equals("no$"))
							continue;
						if(FileListHash.containsKey(key)==false)
						{
							String parentPath = path+id;
							File parent = new File(parentPath);
							parent.mkdirs();
							String path1 = parentPath + "//"+id+"_"+colName+Input.FILRFORMAT;
							try 
							{
								fw = new FileWriter(path1,"rws");
								fw.fileOpen();
							}
							catch (FileNotFoundException e) 
							{
								Vector<Object> temp = new Vector<Object>();
								propertyMap.put("id", id);
								temp.add(propertyMap);
								Master_table master = new Master_table(temp, tableName, sqlDB,process);
								master.insertReport();
								fw = new FileWriter(path1,"rws");
								fw.fileOpen();
							}
							FileListHash.put(key, fw);
						}
					} 
					catch (Exception e) {
					}
				}
				fieldRS.beforeFirst();
			}
			catch (Exception e) {
				e.printStackTrace();
			}			
		}
		t.end();
		TimerRecordFile timerFile=new TimerRecordFile("pool.basicReport","Property_details","lockFiles",t.duration());
	
	}
	
	/**
	 * insertReport() will generate the files for the MID, PID, PV, VT, Ref., FD, TD 
	 * that means all the columns of the property_details table. 
	 * */
	@SuppressWarnings("unchecked")
	synchronized public StringBuffer insertReport()
	{		

		ExecutionTimer t = new ExecutionTimer();
		 t.start();
		/**It stores the Hashtable of the propertyList vector.*/
		Hashtable<Object,Object> propertyMap;
		ResultSet fieldRS1 = sqlDB.getSelect("select fieldname from command where msgtype = '"+plTableName+"' and process = '"+process+"'  order by order1");
		fieldRS = sqlDB.getSelect("select fieldname from command where msgtype = '"+plTableName+"' and process='add' and from_master='yes' order by order1");
		for(int i = 0; i < propertyList.size();i++ )
		{
			propertyMap = new Hashtable<Object, Object>();
			propertyMap.putAll((Hashtable<Object, Object>)propertyList.get(i));
			propertyMap = ConvertToLower.convertHashKey(propertyMap);
			try 
			{
				while (fieldRS.next())
				{
					try 
					{
						String colName = fieldRS.getString("fieldname").toLowerCase();
						id = propertyMap.get(colName).toString();
						String key = id+"_"+colName;
						
						FileWriter fw = (FileWriter) FileListHash.get(key);
						RandomAccessFile raf = fw.file;
						
						if(raf.length()==0)
						{
							try 
							{
								while(fieldRS1.next())
								{
									String colName1 = fieldRS1.getString("fieldname");
									System.out.println("customReport::Condition::generateReport() --> : Column Names are : "+colName1);
									raf.writeBytes(colName1+Input.SEPARATOR);
								}						
								raf.writeBytes(Input.ENDSEPARATOR);
								fieldRS1.beforeFirst();
							}
							catch (Exception e) 
							{
							}
						}
						
						FileOperation fo = new FileOperation(raf);
						fo.addRecordToFile(propertyMap);
					} 
					catch (Exception e) 
					{
						//e.printStackTrace();
					}
				}
				fieldRS.beforeFirst();
			}
			catch (Exception e) { }
		}
		closeAll();
		t.end();
		TimerRecordFile timerFile=new TimerRecordFile("pool.basicReport","Property_details","insertReport",t.duration());
	
		return new StringBuffer();
	}
	
	void closeAll()
	{

		ExecutionTimer t = new ExecutionTimer();
		 t.start();
		Iterator<Object> enumrator = FileListHash.values().iterator();
		while(enumrator.hasNext())
		{
			try
			{
				FileWriter fw = (FileWriter)enumrator.next();
				fw.fileClose();
			}
			catch (Exception e) 
			{
				e.printStackTrace();
			}
		}
		t.end();
		TimerRecordFile timerFile=new TimerRecordFile("pool.basicReport","Property_details","closeAll",t.duration());
	
	}

	
		
	/**
	 * updateReport() will update the files for the MID, PID, PV, VT, Ref., FD, TD 
	 * that means all the columns of the property_details table. 
	 * */
	@SuppressWarnings({ "unchecked", "unchecked" })
	public StringBuffer updateReport() 
	{

		ExecutionTimer t = new ExecutionTimer();
		 t.start();
		lockFiles(newPropertyList); // It will locks the file for the updation operation.
		
		Hashtable<Object, Object> oldPropertyMap;
		Hashtable<Object, Object> newPropertyMap;
		Hashtable<Object, Object> tempPropertyMap;
		Vector<Object> propertyVec = new Vector<Object>();
		int flagDelete = 0 ;
		StringBuffer sbrDTB = new StringBuffer();
		/*ResultSet rs = null;
		
		rs = sqlDB.getSelect("select fieldname from command where msgtype = '"+plTableName+"' and process='add' and from_master='yes' order by order1");
		*/
		for(int i = 0; i < newPropertyList.size();i++ )
		{
			newPropertyMap = new Hashtable<Object, Object>();
			oldPropertyMap = new Hashtable<Object, Object>();
			tempPropertyMap = new Hashtable<Object, Object>();
			
			newPropertyMap.putAll((Hashtable<Object, Object>)newPropertyList.get(i));
			newPropertyMap = ConvertToLower.convertHashKey(newPropertyMap);
			
			oldPropertyMap.putAll((Hashtable<Object, Object>)propertyList.get(i));
			oldPropertyMap = ConvertToLower.convertHashKey(oldPropertyMap);
		
			tempPropertyMap.putAll(oldPropertyMap);
			tempPropertyMap.putAll(newPropertyMap);
			propertyVec.add(tempPropertyMap);
				
			try 
			{
				while (fieldRS.next())
				{
					String colName = fieldRS.getString("fieldname").toLowerCase();
					id = oldPropertyMap.get(colName).toString();
					String key = id+"_"+colName;
					FileWriter fw = (FileWriter) FileListHash.get(key);
					
		/*			SearchRecord search = new SearchRecord(fw.file);
					int RecordIndex = search.getRecordIndexFile(oldPropertyMap);
					if(RecordIndex == 0)
					{
							System.out.println("basicReports::TransactionBasicReport::updateReport(): Unable To Find Record ");
							sbrDTB.append("Unable To Find Record ");
							//return sbrDTB;
					}
			*/		
					FileOperation fo = new FileOperation(fw.file);
					if(fo.deleteFileRecord(oldPropertyMap))
					{
						sbrDTB.append("Record Deleted Successfully ");
						flagDelete = 1;
					}
				}
				fieldRS.beforeFirst();
			}
			catch (Exception e) { }
		}
		
		if (flagDelete == 1)
		{
			propertyList = propertyVec;
			insertReport();
		}
		closeAll();
		t.end();
		TimerRecordFile timerFile=new TimerRecordFile("pool.basicReport","Property_details","updateReport",t.duration());
	
		return sbrDTB;
	}

	@Override

	public StringBuffer deleteReport()
	{	

		ExecutionTimer t = new ExecutionTimer();
		 t.start();
		Hashtable<Object, Object> oldPropertyMap;
	
		Vector<Object> propertyVec = new Vector<Object>();
		
		StringBuffer sbrDTB = new StringBuffer();
		
		for(int i = 0; i < propertyList.size();i++ )
		{
			oldPropertyMap = new Hashtable<Object, Object>();
			
			oldPropertyMap.putAll((Hashtable<Object, Object>)propertyList.get(i));
			oldPropertyMap = ConvertToLower.convertHashKey(oldPropertyMap);
		
			propertyVec.add(oldPropertyMap);
				
			try 
			{
				while (fieldRS.next())
				{
					try
					{
						String colName = fieldRS.getString("fieldname").toLowerCase();
						id = oldPropertyMap.get(colName).toString();
						String key = id+"_"+colName;
						FileWriter fw = (FileWriter) FileListHash.get(key);
						
						
						
						FileOperation fo = new FileOperation(fw.file);
						if(fo.deleteFileRecord(oldPropertyMap))
						{
							sbrDTB.append("Record Deleted Successfully ");
						}
						else
						{
							System.out.println("basicReports::TransactionBasicReport::updateReport(): Unable To Find Record ");
							sbrDTB.append("Unable To Find Record ");
						}
					}
					catch (Exception e) { }
				}
				fieldRS.beforeFirst();
			}
			catch (Exception e) { }			
			
		}		
		
		closeAll();
		t.end();
		TimerRecordFile timerFile=new TimerRecordFile("pool.basicReport","Property_details","deleteReport",t.duration());
	
		return new StringBuffer();	
	}
	
	
	


}
