package randomservice;

import java.sql.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;

public class TestTaskPersistence {

    // Snapshot of local quotas
    Map<Integer, Long> oldLocals = new HashMap<Integer, Long>();
    // Snapshot of base quotas
    Map<Integer, Long> oldPersistent = new HashMap<Integer, Long>();

    // TestTaskImpl this instance work with
    TestTaskImpl local;
    Connection connection;

    // Date from last synchronization. It needed when we want all records from base, which was changes since last synchronization
    Timestamp oldDate;

    // If destroy was executed, we need to do synchronization before any further actions;
    volatile CountDownLatch destroyCountDown = new CountDownLatch(1);

    final static int TIME_IN_MILLS = 3000;
    final static String DELETE_ALL = "DELETE from quotas";
    final static String SELECT_ALL = "SELECT user_id, quota FROM quotas";
    final static String INSERT = "INSERT into quotas (user_id, quota, change_time) values (?, ?, clock_timestamp())";

    private final static String UPDATE = "UPDATE quotas set quota = quota + ?, change_time = clock_timestamp()  where user_id=?";
    private final static String SELECT_WITH_FILTER = "SELECT user_id, quota FROM quotas where user_id in (";
    private final static String SELECT_BY_CHANGE_TIME = SELECT_ALL + " where change_time > ?";
    private final static String GET_BASE_TIME = "select clock_timestamp()";

    // Needed to work with time from basse
    private final static Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC"));

    final private ScheduledExecutorService synchronizeExecutor = Executors.newSingleThreadScheduledExecutor();

    /**
     * Sets TestTaskImpl, load initial data from base and start synchronization process.
     *
     * @param local
     * @param debug if true - it's test and no automatic synchronization required
     */
    public void setTestTask(final TestTaskImpl local, boolean debug) {
        this.local = local;
        try {
            Class.forName("org.postgresql.Driver");
        } catch (ClassNotFoundException e) {
            throw new RuntimeException(e);
        }

        try {
            connection = DriverManager.getConnection(
                    "jdbc:postgresql://192.241.172.238:5432/gifts", "batman",
                    "r10bYyXL9chHg2uorwKM");
            ResultSet resultSet = connection.prepareStatement(GET_BASE_TIME).executeQuery();
            resultSet.next();

            oldDate = resultSet.getTimestamp("clock_timestamp", calendar);

            // Retrieves initial snapshots and get actual quotas
            iterateOverBase(new UseBaseData() {
                @Override
                public void useData(int userId, long quota) {
                    local.getUserQuotas().put(userId, new AtomicLong(quota));
                    oldLocals.put(userId, quota);
                    oldPersistent.put(userId, quota);
                }
            }, connection.prepareStatement(SELECT_ALL));

        } catch (SQLException e) {
            throw new RuntimeException(e);
        }

        if (!debug) {
            // schedule a regular synchronization
            synchronizeExecutor.scheduleAtFixedRate(new Runnable() {

                boolean destroy = false; // we need to make sure, that we do FULL synchronization after destroy method was executed

                @Override
                public void run() {
                    if (destroy || local.clearAll) {
                        return;
                    }
                    destroy = local.destroy;
                    try {
                        synchronize();
                    } catch (SQLException e) {
                        throw new RuntimeException(e);
                    }
                    if (destroy) {
                        destroyCountDown.countDown();
                        synchronizeExecutor.shutdown();
                    }
                }
            }, 0, TIME_IN_MILLS, TimeUnit.MILLISECONDS);
        }
    }

    void destroy() {
        try {
            destroyCountDown.await(); // wait for synchronization
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
    }

    private void iterateOverBase(UseBaseData useData, PreparedStatement preparedStatement) {
        try {
            ResultSet resultSet = preparedStatement.executeQuery();
            while (resultSet.next()) {
                int userId = resultSet.getInt("user_id");
                long quota = resultSet.getLong("quota");
                useData.useData(userId, quota);
            }
        } catch (SQLException e) {
            throw new RuntimeException(e);
        }
    }

    final void synchronize() throws SQLException {

        // First retrieve and process all users which quotas was changed in base since last synchronization
        final Set<Integer> processedUsers = updateUsingBaseNews();             // UPDATES

        // than get all local changed users
        Set<Integer> toProcess = local.getChangedUsersAndClear();

        // clear users toProcess from users, which was processed in updateUsingBaseNews
        toProcess.removeAll(processedUsers);

        Set<Integer> alreadyInBase = null;
        while (alreadyInBase == null) {
            try {
                alreadyInBase = updateUsingLocalNews(toProcess);               // INSERTS
            } catch (SQLException e) {
                if (e.getNextException().getMessage().contains("constraintname")) {  // Some other instance have added such user.
                    toProcess.removeAll(updateUsingBaseNews());   // UPDATES. than try again to go from base news point of view(it is base news!), and try do it all again
                } else {
                    throw e;
                }
            }
        }

        updateUsingBaseNews(alreadyInBase); // finally update all users, which are not changed in base but already there and changed locally

    }

    private Set<Integer> updateUsingLocalNews(final Set<Integer> usersToUpdate) throws SQLException {

        Set<Integer> usersWhichAlreadyInBase = new HashSet<Integer>();
        Map<Integer, Long> toInsert = new HashMap<Integer, Long>();
        PreparedStatement insert = connection.prepareStatement(INSERT);
        for (Integer user : usersToUpdate) {
            if (oldPersistent.containsKey(user)) {
                usersWhichAlreadyInBase.add(user);
            } else {
                insert.setInt(1, user);
                AtomicLong atomicNowLocal = local.getUserQuota(user, 10);
                long nowLocal = atomicNowLocal.get();
                insert.setLong(2, nowLocal);
                toInsert.put(user, nowLocal);
                insert.addBatch();
            }
        }
        synchronized (connection) {
            connection.setAutoCommit(false);
            try {
                insert.executeBatch();
                connection.commit();
            } catch (SQLException e) {
                connection.rollback();
                throw e;
            }
            connection.setAutoCommit(true);
        }
        // update snapshot
        for (Map.Entry<Integer, Long> user : toInsert.entrySet()) {
            oldPersistent.put(user.getKey(), user.getValue());
            oldLocals.put(user.getKey(), user.getValue());
        }

        return usersWhichAlreadyInBase;
    }

    private Set<Integer> updateUsingBaseNews() throws SQLException {
        return updateUsingBaseNews(null);
    }

    private Set<Integer> updateUsingBaseNews(Set<Integer> filters) throws SQLException {
        final Set<Integer> processedUsers = new HashSet<Integer>();
        ResultSet resultSet;
        synchronized (connection) {
            resultSet = connection.prepareStatement(GET_BASE_TIME).executeQuery();
        }
        resultSet.next();

        Timestamp buffer = resultSet.getTimestamp("clock_timestamp", calendar);
        Timestamp oldOldDate = oldDate;
        oldDate = buffer;

        PreparedStatement select;

        if (filters != null) {   // if we want some special users to be processed
            if (filters.size() == 0) {
                return new HashSet<Integer>();
            }

            StringBuilder filtersStringBuilder = new StringBuilder(SELECT_WITH_FILTER);
            boolean comma = false;
            for (Integer id : filters) {
                if (comma)
                    filtersStringBuilder.append(",");
                filtersStringBuilder.append(id);
                comma = true;
            }
            filtersStringBuilder.append(")");
            select = connection.prepareStatement(filtersStringBuilder.toString());

        } else {
            select = connection.prepareStatement(SELECT_BY_CHANGE_TIME);
            select.setTimestamp(1, oldOldDate, calendar);
        }

        //http://www.postgresql.org/docs/8.2/static/explicit-locking.html
        final PreparedStatement preparedStatement = connection.prepareStatement(UPDATE);

        iterateOverBase(new UseBaseData() {
            @Override
            public void useData(int userId, long quota) {
                updateQuotas(userId, quota, preparedStatement, processedUsers);
            }
        }, select);

        synchronized (connection) {
            connection.setAutoCommit(false);
            try {
                preparedStatement.executeBatch();
            } catch (SQLException e) {
                connection.rollback();
                throw e;
            }
            connection.commit();
            connection.setAutoCommit(true);
        }
        return processedUsers;
    }

    private void updateQuotas(int userId, long quota, PreparedStatement preparedStatement, Set<Integer> processedUsers) {
        AtomicLong atomicNowLocal = local.getUserQuota(userId, 10);

        long nowLocal = atomicNowLocal.get();
        long nowBase = quota;

        int distributed_creation = 0;
        if (!oldPersistent.containsKey(userId) && !oldLocals.containsKey(userId)) {
            distributed_creation = TestTaskImpl.START_QUOTA;
        }

        Long oldPersistentBuffer = oldPersistent.get(userId);
        long oldBase = (oldPersistentBuffer == null) ? 0 : oldPersistentBuffer;

        Long oldLocalBuffer = oldLocals.get(userId);
        long oldLocal = (oldLocalBuffer == null) ? 0 : oldLocalBuffer;

        local.addQuota(nowBase - oldBase - distributed_creation, atomicNowLocal);
        try {
            updateBaseNow(userId, nowLocal - oldLocal - distributed_creation, preparedStatement);
        } catch (SQLException e) {
            throw new RuntimeException(e);
        }

        long oldOldLocal = oldLocal;
        oldLocal = nowLocal + (oldBase - nowBase);
        oldBase = nowBase + (oldOldLocal - nowBase);

        oldLocals.put(userId, oldLocal);
        oldPersistent.put(userId, oldBase);

        processedUsers.add(userId);
    }

    private void updateBaseNow(int userId, long delta, PreparedStatement preparedStatement) throws SQLException {
        preparedStatement.setLong(1, delta);
        preparedStatement.setInt(2, userId);
        preparedStatement.addBatch();
    }

    public void clear() {
        try {
            connection.prepareStatement(DELETE_ALL).executeUpdate();
        } catch (SQLException e) {
            throw new RuntimeException(e);
        }
    }


    interface UseBaseData {
        void useData(int userId, long quota);
    }

}
