//
// (C) Copyright 2009 Irantha Suwandarathna (irantha@gmail.com)
// All rights reserved.
//

/* Copyright (c) 2001-2008, The HSQL Development Group
 * All rights reserved.
 *
 * Redistribution and use _in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions _in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer _in the documentation
 * and/or other materials provided with the distribution.
 *
 * Neither the name of the HSQL Development Group nor the names of its
 * contributors may be used to endorse or promote products derived from this
 * software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG,
 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */


using System;
using System.IO;
using System.Collections.Generic;
using EffiProz.Core.Lib;
using EffiProz.Core.RowIO;
using EffiProz.Core.Errors;
using EffiProz.Core.Store;
using EffiProz.Core.Navigators;


namespace EffiProz.Core.Persist
{
    // oj@openoffice.org - changed to file access api

    /**
     *  Routine to defrag the *.data file.
     *
     *  This method iterates over the primary index of a table to find the
     *  disk position for each row and stores it, together with the new position
     *  _in an array.
     *
     *  A second pass over the primary index writes each row to the new disk
     *  image after translating the old pointers to the new.
     *
     * @author     fredt@users
     * @version    1.8.0
     * @since      1.7.2
     */
    sealed public class DataFileDefrag
    {

        Stream fileStreamOut;
        long fileOffset;
        StopWatch stopw = new StopWatch();
        String dataFileName;
        int[][] rootsList;
        Database database;
        DataFileCache cache;
        int scale;
        DoubleIntIndex transactionRowLookup;

        public DataFileDefrag(Database db, DataFileCache cache, String dataFileName)
        {

            this.database = db;
            this.cache = cache;
            this.scale = cache.cacheFileScale;
            this.dataFileName = dataFileName;
        }

        public void process()
        {

            bool complete = false;

            Error.printSystemOut("Defrag process begins");

            transactionRowLookup = database.txManager.getTransactionIDList();

            Error.printSystemOut("transaction count: "
                                 + transactionRowLookup.size());

            EfzArrayList allTables = database.schemaManager.getAllTables();

            rootsList = new int[allTables.size()][];

            Storage dest = null;

            try
            {
                Stream fos =
                    database.logger.getFileAccess().OpenOutputStreamElement(
                        dataFileName + ".new");
#if !SILVERLIGHT
                fileStreamOut = new BufferedStream(fos, 1 << 12);
#else
                fileStreamOut = fos;
#endif

                for (int i = 0; i < DataFileCache.INITIAL_FREE_POS; i++)
                {
                    fileStreamOut.WriteByte(0);
                }

                fileOffset = DataFileCache.INITIAL_FREE_POS;

                for (int i = 0, tSize = allTables.size(); i < tSize; i++)
                {
                    Table t = (Table)allTables.get(i);

                    if (t.getTableType() == TableBase.CACHED_TABLE)
                    {
                        int[] rootsArray = writeTableToDataFile(t);

                        rootsList[i] = rootsArray;
                    }
                    else
                    {
                        rootsList[i] = null;
                    }

                    Error.printSystemOut("table: " + t.getName().name
                                         + " complete");
                }

                fileStreamOut.Flush();
                fileStreamOut.Close();

                fileStreamOut = null;

                // write out the end of file position
                dest = ScaledRAFile.newScaledRAFile(
                    database, dataFileName
                    + ".new", false, ScaledRAFile.DATA_FILE_RAF);

                dest.seek(DataFileCache.LONG_FREE_POS_POS);
                dest.writeLong(fileOffset);

                // set shadowed flag;
                int flags = 0;

                if (database.logger.propIncrementBackup)
                {
                    flags = BitMap.set(flags, DataFileCache.FLAG_ISSHADOWED);
                }

                flags = BitMap.set(flags, DataFileCache.FLAG_190);
                flags = BitMap.set(flags, DataFileCache.FLAG_ISSAVED);

                dest.seek(DataFileCache.FLAGS_POS);
                dest.writeInt(flags);
                dest.close();

                dest = null;

                for (int i = 0, size = rootsList.Length; i < size; i++)
                {
                    int[] roots = rootsList[i];

                    if (roots != null)
                    {
                        Error.printSystemOut(
                            "roots: "
                            + StringUtil.getList(roots, ",", ""));
                    }
                }

                complete = true;
            }
            catch (IOException e)
            {
                throw Error.error(ErrorCode.FILE_IO_ERROR, e);
            }
            catch (OutOfMemoryException e)
            {
                throw Error.error(ErrorCode.OUT_OF_MEMORY, e);
            }
            catch (Exception t)
            {
                throw Error.error(ErrorCode.GENERAL_ERROR, t);
            }
            finally
            {
                try
                {
                    if (fileStreamOut != null)
                    {
                        fileStreamOut.Close();
                    }

                    if (dest != null)
                    {
                        dest.close();
                    }
                }
                catch (Exception t)
                {
                    database.logger.logSevereEvent("backupFile failed", t);
                }

                if (!complete)
                {
                    database.logger.getFileAccess().removeElement(dataFileName
                            + ".new");
                }
            }

            Error.printSystemOut("Defrag transfer complete: "
                                 + stopw.elapsedTime());
        }

        /**
         * called from outside after the complete end of defrag
         */
        public void updateTableIndexRoots()
        {

            EfzArrayList allTables = database.schemaManager.getAllTables();

            for (int i = 0, size = allTables.size(); i < size; i++)
            {
                Table t = (Table)allTables.get(i);

                if (t.getTableType() == TableBase.CACHED_TABLE)
                {
                    int[] rootsArray = rootsList[i];

                    t.setIndexRoots(rootsArray);
                }
            }
        }

        /**
         * called from outside after the complete end of defrag
         */
        public void updateTransactionRowIDs()
        {
            database.txManager.convertTransactionIDs(transactionRowLookup);
        }

        public int[] writeTableToDataFile(Table table)
        {

            Session session = database.getSessionManager().getSysSession();
            PersistentStore store = session.sessionData.getRowStore(table);
            RowOutputInterface rowOut = cache.rowOut.clone();
            DoubleIntIndex pointerLookup =
                new DoubleIntIndex(table.getPrimaryIndex().sizeEstimate(store),
                                   false);
            int[] rootsArray = table.getIndexRootsArray();
            long pos = fileOffset;
            int count = 0;

            pointerLookup.setKeysSearchTarget();
            Error.printSystemOut("lookup begins: " + stopw.elapsedTime());

            // all rows
            RowIterator it = table.rowIterator(store);

            for (; it.hasNext(); count++)
            {
                CachedObject row = it.getNextRow();

                pointerLookup.addUnsorted(row.getPos(), (int)(pos / scale));

                if (count % 50000 == 0)
                {
                    Error.printSystemOut("pointer pair for row " + count + " "
                                         + row.getPos() + " " + pos);
                }

                pos += row.getStorageSize();
            }

            Error.printSystemOut("table: " + table.getName().name + " list done: "
                                 + stopw.elapsedTime());

            count = 0;
            it = table.rowIterator(store);

            for (; it.hasNext(); count++)
            {
                CachedObject row = it.getNextRow();

                rowOut.reset();
                row.write(rowOut, pointerLookup);
                fileStreamOut.Write(rowOut.getOutputStream().getBuffer(), 0,
                                    rowOut.size());

                fileOffset += row.getStorageSize();

                if ((count) % 50000 == 0)
                {
                    Error.printSystemOut(count + " rows " + stopw.elapsedTime());
                }
            }

            for (int i = 0; i < rootsArray.Length; i++)
            {
                if (rootsArray[i] == -1)
                {
                    continue;
                }

                int lookupIndex =
                    pointerLookup.findFirstEqualKeyIndex(rootsArray[i]);

                if (lookupIndex == -1)
                {
                    throw Error.error(ErrorCode.DATA_FILE_ERROR);
                }

                rootsArray[i] = pointerLookup.getValue(lookupIndex);
            }

            setTransactionRowLookups(pointerLookup);
            Error.printSystemOut("table: " + table.getName().name
                                 + " : table converted");

            return rootsArray;
        }

        public int[][] getIndexRoots()
        {
            return rootsList;
        }

        public void setTransactionRowLookups(DoubleIntIndex pointerLookup)
        {

            for (int i = 0, size = transactionRowLookup.size(); i < size; i++)
            {
                int key = transactionRowLookup.getKey(i);
                int lookupIndex = pointerLookup.findFirstEqualKeyIndex(key);

                if (lookupIndex != -1)
                {
                    transactionRowLookup.setValue(
                        i, pointerLookup.getValue(lookupIndex));
                }
            }
        }
    }
}
