/*
 * Copyright 2009-2010 Belmont Software Services
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.belmont.backup;

import java.io.*;
import java.util.*;
import org.json.*;
import java.sql.*;

public class BackupClient implements IBackupConstants, IEventProducer {
    static final boolean DEBUG = true;

    FileDatabase db;
    DatabaseManifest dbm;
    File configDir;
    FileStorage storage;
    Producer producer;
    int totalRequests;
    boolean sentFiles = false;
    int requestsSent = 0;
    String session;
    String includeFilter[];
    String excludeFilter[];

    public void setDatabase(FileDatabase db) {
	this.db = db;
	this.dbm = new DatabaseManifest(db);
    }

    public void addObserver(IEventObserver observer, int start, int end) {
	producer.addObserver(observer, start, end);
    }
    
    public void removeObserver(IEventObserver observer) {
	producer.removeObserver(observer);
    }

    public void setSession(String session) {
	this.session = session;
    }

    public BackupClient(File configDir) {
	this.configDir = configDir;
	if (!configDir.isDirectory()) {
	    configDir.mkdirs();
	}
	producer = Producer.getInstance();
	this.storage = FileStorage.getStorage(new File(configDir, STORE_DIR).getAbsolutePath());
    }	

    JSONObject makeFileRequests(ResultSet rs, int amount) throws SQLException, JSONException {
	JSONObject obj = null;
	long s = 0;
	while (rs.next()) {
	    if (obj == null) {
		obj = new JSONObject();
	    }

	    // XXX: create a pool of these so we're not allocating so
	    // many objects
	    
	    JSONObject f = new JSONObject();

	    f.put("path", rs.getString(1));
	    f.put("checksum", rs.getString(2));
	    f.put("size", rs.getLong(3));
	    f.put("modified", rs.getLong(4));
	    s += rs.getLong(3);

	    obj.append("files", f);

	    if (--amount == 0) {
		obj.put("size", s);
		return obj;
	    }
	}

	if (obj != null) {
	    obj.put("size", s);
	}

	return obj;
    }

    /**
     * Iterate over all the tasks in the database. Each task contains
     * the following properties: 
     * 
     * - task.type - either "backup" or "restore"
     * - task.lastRun - date at which task was last run or 0 if never
     *                  run 
     * - task.includeFilters - include filters to use
     * - task.excludeFilters - exclude filters to use
     * - task.sourceDir - directory to backup
     * - task.nextRun - offset in hours of next scheduled run
     * - task.status - status of last run "done", "failed", "pending"
     *
     * For each task we run backup method with the given
     * include/exclude filters (or use the default ones if not
     * given). 
     *
     */
    public void processScheduledBackups() throws IOException, InterruptedException, SQLException {
	ResultSet rs = db.getTasks();
	try {
	    while (rs.next()) {
		int id = rs.getInt(1);
		String name = rs.getString(2);
		String type = db.getDBProperty(id, "task", "type");
		String dir = db.getDBProperty(id, "task", "sourceDir");
		String includeFilters = db.getDBProperty(id, "task", "includeFilters");
		String excludeFilters = db.getDBProperty(id, "task", "excludeFilters");
		String lastRun = db.getDBProperty(id, "task", "lastRun");
		String nextRun = db.getDBProperty(id, "task", "nextRun");
		String status = db.getDBProperty(id, "task", "status");

		
	    }
	} finally {
	    rs.close();
	}
    }

    /**
     * Initiate or continue a backup of a source directory.
     *  - Delete the backup record with pathid for <sourceDir>
     *    and null endtime (first delete the backupentries that
     * 	  reference it).
     * - Create a new backup record with pathid referencing
     *   <sourceDir> and the current start time.
     * - Create a DatabaseManifest of the given source dir with
     *   the new backup record id passed in. For each file
     *   we can optimize checksum scan by comparing the size
     *   and mod date of each backupfile.
     * - While indexing when the class sends an event that a new
     *   backupentry has been created (sending its integer database
     *   id as the argument), an event is sent to a thread that
     *   is sending files to the backup server.
     */
    public synchronized boolean backup(File sourceDir, IBackupServer server) throws IOException, InterruptedException, SQLException {
	String abspath = sourceDir.getAbsolutePath();
	Utils.clearCancel(abspath);

	if (db == null) {
	    db = new FileDatabase();
	    db.init(new File(configDir, CONFIG_DB_NAME).getAbsolutePath());
	    dbm = new DatabaseManifest(db);
	}

	int pathid = db.addPath(abspath);
	Status status = Status.getInstance();
	status.reset();
	producer.sendNotify(this, NOTIFY_BACKUP_START, abspath);
	Utils.log(LOG_INFO, "Starting backup "+abspath);
	// Delete any previous backups that didn't complete.
	db.deletePendingBackups(pathid);
	totalRequests = 0;
	sentFiles = false;
	try {
	    Config config = server.getConfig();
	    JSONObject reply = server.startBackup(session, abspath);
	    String backupid = reply.optString("backupid");
	    
	    //producer.sendNotify(this, NOTIFY_SCANNING_START, abspath);
	    status.setCurrentFile(abspath);
	    if (includeFilter == null && excludeFilter == null) {
		dbm.setFilters(config.getDefaultIncludeFilter(), config.getDefaultExcludeFilter());
	    } else {
		dbm.setFilters(includeFilter, excludeFilter);
	    }
	    int total = dbm.createIndex(backupid, "BackupClient", null, sourceDir, storage);
	    //producer.sendNotify(this, NOTIFY_SCANNING_STOP, abspath);

	    JSONObject map;
	    int sent = 0;
	    boolean partialFailure = false;
	    ResultSet rs = db.getBackupEntries(backupid);
	    JSONObject fileRequest = null;
	    long totalSize = db.getMaxSize(backupid);

	    //producer.sendNotify(this, NOTIFY_TRANSFER_SIZE, totalSize);
	    //producer.sendNotify(this, NOTIFY_FILE_COUNT_CHANGED, new Integer(total));
	    status.setAmountToTransfer(totalSize);
	    status.setFilesToTransfer(total);

	    try {
		while ((fileRequest = makeFileRequests(rs, 10)) != null) {
		    map = server.checkFiles(session, backupid, abspath, fileRequest);

		    if (map == null) {
			Utils.log(LOG_ERROR, "backup failed (server returned null map) "+abspath);
			producer.sendNotify(this, NOTIFY_BACKUP_FAILED, abspath);
			return false;
		    }

		    JSONArray fs = fileRequest.optJSONArray("files");
		    JSONArray files = map.optJSONArray("requests");

		    if (files == null) {
			if (fs != null) {
			    totalRequests += fs.length();
			    status.setFilesTransferred(totalRequests);
			    //producer.sendNotify(this, NOTIFY_COMPLETED_FILE_COUNT_CHANGED,new Integer(totalRequests));
			}
			continue;
		    }

		    requestsSent = processFileRequests(abspath, server, files);

		    if (requestsSent < files.length()) {
			Utils.log(LOG_WARNING, "PARTIAL FAILURE: "+requestsSent+" "+files.length());
			partialFailure = true;
		    }

		    if (fs != null) {
			totalRequests += fs.length() - (files.length() - requestsSent);
			//producer.sendNotify(this, NOTIFY_COMPLETED_FILE_COUNT_CHANGED, new Integer(totalRequests));
			status.setFilesTransferred(totalRequests);
		    }
		}
	    } finally {
		rs.close();
	    }

	    if (partialFailure) {
		producer.sendNotify(this, NOTIFY_PARTIAL_BACKUP, abspath);
		return false;
	    } else {
		producer.sendNotify(this, NOTIFY_TRANSFER_COMPLETE, null);
		if (true || sentFiles) {
		    server.commitBackup(session, backupid, abspath);
		    db.setBackupStatus(backupid, BACKUP_STATUS_DONE, new java.sql.Date(System.currentTimeMillis()));
		    Utils.log(LOG_INFO, "backup completed "+abspath+" "+((sentFiles) ? "" :"no files sent"));
		} else {
		    // XXX: this has the effect that the latest date
		    // of the backup is never updated so, while
		    // correct, it always seems the latest backup is
		    // fixed at the time of the last change. Instead
		    // of cancel, we'd like to update the end time and
		    // start times on the last backup or else delete
		    // the previous backup and commit this one.

		    server.cancelBackup(session, abspath);
		    db.deletePendingBackups(pathid);
		    Utils.log(LOG_INFO, "backup completed (no files sent)"+abspath);
		}
		producer.sendNotify(this, NOTIFY_BACKUP_DONE, abspath);
		return true;
	    }
	} catch (JSONException ex) {
	    producer.sendNotify(this, NOTIFY_EXCEPTION, ex);
	    Utils.log(LOG_ERROR, "backup exception "+abspath, ex);
	    throw new IOException(ex.toString());
	} catch (IOException ex) {
	    producer.sendNotify(this, NOTIFY_EXCEPTION, ex);
	    Utils.log(LOG_ERROR, "backup exception "+abspath, ex);
	    throw ex;
	}
    }

    int processFileRequests(String abspath, IBackupServer server, JSONArray requests) throws InterruptedException {
	if (requests == null || requests.length() == 0) {
	    //producer.sendNotify(this, NOTIFY_TRANSFER_SIZE, new Long(0));
	    return 0;
	}

	requestsSent = 0;
	int l = (requests == null) ? 0 : requests.length();
	Status status = Status.getInstance();
	for (int i = 0; i < l; i++) {
	    JSONObject request = (JSONObject)requests.opt(i);
	    String name = FileRequest.getName(request);
	    String digest = FileRequest.getDigest(request);

	    try {
		//producer.sendNotify(this, NOTIFY_START_FILE, name);
		status.setCurrentFile(name);
		SFile sf = storage.getFile(digest, false);
		if (sf == null) {
		    producer.sendNotify(this, NOTIFY_FILE_ERROR, name);
		    Utils.log(LOG_ERROR,"File not found: "+digest);
		    continue;
		}
		Utils.checkCancel(abspath);
		sendFile(sf, digest, name, FileRequest.getOffset(request), server);
		requestsSent++;
	    } catch (IOException ex) {
		Utils.log(LOG_ERROR, "Exception in processFileRequests ", ex);
		producer.sendNotify(this, NOTIFY_EXCEPTION, ex);
		producer.sendNotify(this, NOTIFY_FILE_ERROR, name);
	    } catch (Throwable ex) {
		Utils.log(LOG_ERROR, "Exception in processFileRequests ", ex);
		ex.printStackTrace();
	    }
	}

	return requestsSent;
    }

    void sendFile(SFile sf, String digest,
		  String name, long offset, IBackupServer server) throws IOException, InterruptedException {
	InputStream in = sf.getInputStream(offset);
	if (in == null) {
	    producer.sendNotify(this, NOTIFY_FILE_ERROR, name);
	} else {
	    try {
		server.sendFile(session, digest, name, sf.getAvailableData(), offset, in);
		//producer.sendNotify(this, NOTIFY_FILE_DONE, digest);
		Utils.log(LOG_INFO, "Sent file "+name+" digest: "+digest+" offset: "+offset);
		sentFiles = true;
	    } finally {
		in.close();
	    }
	}
    }

    public static void main(String args[]) throws Exception {
	BackupClient client = new BackupClient(new File(args[1]));
	FileBackupServer server = new FileBackupServer(new File(args[2]));
	String session = server.connect(BACKUP_PROTOCOL_VERSION, "test.backupclient", "localhost",
					System.getProperty("os.name"),
					System.getProperty("os.version"),
					System.getProperty("user.name"));

	client.setSession(session);
	if (client.backup(new File(args[0]), server)) {
	    System.out.println("Backup succeeded.");
	} else {
	    System.out.println("Backup failed.");
	}
	server.disconnect(session);
	System.exit(0);
    }
}