package edu.harvard.s3xyback;

import com.amazonaws.auth.PropertiesCredentials;
import com.amazonaws.services.s3.AmazonS3EncryptionClient;
import com.amazonaws.services.s3.model.*;
import org.apache.commons.codec.digest.DigestUtils;
import org.joda.time.Days;
import org.joda.time.LocalDate;

import javax.crypto.SecretKey;
import java.io.*;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.logging.FileHandler;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.logging.SimpleFormatter;

public class S3xyBack {

    // How frequently should reports be generated
	enum ReportSchedule {DAILY, WEEKLY, MONTHLY}
	
	/**
     * Main method to kick things off
	 * @param args
	 */
	public static void main(String[] args) throws Exception{

        // TODO: create an AWS account for new users upon installation and send them their credentials

        // Directory path for which config file, log file, and AES key and salt are stored
        // Example: C:\\Users\\ayoung01\\Dropbox
        String userPath = "";

        // The path at which the user's configuration file is stored
        // TODO: standardize config file location in the installation directory
        String configPath = userPath + "\\my_properties.properties";

        // Load configuration file
        Properties configFile = new Properties();
        configFile.load(new FileInputStream(configPath));

        // TODO: Check the version of the config file format to detect incompatible upgrades
        String configVersion;

        // PARAMETERS READ FROM CONFIG FILE:

        // Contains the directories the user wishes to upload
        String[] dirs = configFile.getProperty("directories").split(", ");

        // How frequently, in days, should a given file be checked for corruption
		int checkDays = Integer.parseInt(configFile.getProperty("checkDays"));

        // How frequently reports should be generated
		ReportSchedule reportSched = ReportSchedule.valueOf(configFile.getProperty("reportSched"));

        // Whether or not the user chooses to configure auto-restore
		boolean autoRestore = Boolean.valueOf(configFile.getProperty("autoRestore"));

        // Location of the log file generated
        // TODO: should later be standardized in the installation directory
        String logFilePath = userPath + "\\MyLogFile.log";

        // Stores AES key and salt file into the first configured directory
        // TODO: should later have these standardized in the installation directory
		String aesKeyFile  = userPath + "\\.aws_key";
		String aesSaltFile = userPath + "\\.aws_salt";

        // The user will configure a passphrase, from which we will generate an encryption key and salt.
        // This should only occur once, when the user first runs the program. The key should be preserved
        // locally, for subsequent use.

		// check for encryption key, 
			// if none, generate key based on configured passphrase
		SecretKey key = null;
		File keyFile = new File(aesKeyFile);
		if (keyFile.exists()) {
			key = AESKeyFactory.readKey(keyFile);
		} else {
            PasswordInput pass = new PasswordInput();
            String awsPassphrase = pass.getPassword("Please provide a password: ");
			AESKeyFactory.generateKey(awsPassphrase, aesKeyFile, aesSaltFile);
			key = AESKeyFactory.readKey(keyFile);
		}

		// connect to AWS
        // TODO (future): support multi-tenancy scenario with dual-encrypted envelope keys
        // AWS provides encryption with its API (using our keys), however, multiple keys were not supported last time I
        // looked. This prevents us from decrypting all users' files in the multi-tenancy scenario. If we did the encryption
        // ourselves, we could create multiple "envelope keys" so that multiple parties could decrypt the same data
        // without sharing keys.
		AmazonS3EncryptionClient s3 = new AmazonS3EncryptionClient(new PropertiesCredentials(new File(configPath)), new EncryptionMaterials(key));

        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        // AWS storage is organized into flat buckets (not directories). We will generate a random string for
        // each new directory. That string will be stored in that directory within a .aws_bucket file
        // Note: the use of a random string is just to get started, it would probably be good to utilize the directory
        // name so that when we go to the AWS web console, we know which buckets match which directories
        String awsBucketFile = ".aws_bucket";
        HashMap<String, String> dirBuckets = new HashMap<String, String>();

		// check for the existence of a bucket for each configured directory, if this directory is new
        // and a bucket has not yet been created, we will create a new bucket
		for (String d : dirs) {
			String bucket = null;
			File bucketFile = new File(d + File.separatorChar + awsBucketFile);
			if (bucketFile.exists()) {
				BufferedReader in = new BufferedReader(new FileReader(bucketFile));
				bucket = in.readLine();
				in.close();
				
				if (! s3.doesBucketExist(bucket)) // The bucket was deleted from Amazon for some reason
					throw new Exception("Bad bucket configuration for " + d);
                    // TODO: since a user may decide to delete a bucket from Amazon, we should handle this more gracefully
                    // perhaps prompt the user to re-create the bucket?
				
			} else {
                // Generate the random string for the bucket name
                // TODO: replace this with a more meaningful name
				String uuid = UUID.randomUUID().toString().replaceAll("[^A-Za-z0-9]", "").toLowerCase();
				String baseName = new File(d).getName().replaceAll("[^A-Za-z0-9]", "").toLowerCase();

				bucket = baseName + uuid;

                // write the new name to the .aws_bucket file for that directory
                BufferedWriter out = new BufferedWriter(new FileWriter(bucketFile));
                out.write(bucket);
                out.close();

                // create the new bucket on Amazon S3
                System.out.println("Creating a bucket: " + bucket);
				s3.createBucket(bucket);
			}

            // Maps buckets and directories for later reference
			dirBuckets.put(d, bucket);
		}

        // Writes directories : buckets to a log file
        Logger logger = Logger.getLogger("MyLog");
        FileHandler fh;
        try {

            // This block configures the logger with handler and formatter
            // TODO: configure in the installation directory
            fh = new FileHandler(logFilePath, true);
            logger.addHandler(fh);
            logger.setLevel(Level.ALL);
            SimpleFormatter formatter = new SimpleFormatter();
            fh.setFormatter(formatter);

        } catch (SecurityException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        }
		for (String d : dirBuckets.keySet()) {
            // the following statement is used to log any messages
            logger.info(d + ": " + dirBuckets.get(d));
		}

        // Here is the meat of the program:

		// For each configured directory:
		for (String d : dirs) {
            String bucket = dirBuckets.get(d);
            System.out.println("Iterating through " + d);

            // Create a lookup table of key : value pairs by traversing each bucket
			HashMap<String, S3ObjectSummary> map = listS3Keys(s3.listObjects(bucket));

            // Collect all file paths in an ArrayList
            List<File> localFiles = listFiles(d); // TODO: naturally, this only lists files, not nested directories, we will need to think about how we want to handle nested directories

            // Scan for new files on the user's file system (FS)
            for (File f : localFiles) {

                // If a file has no entry in S3, generate the MD5 checksum and the lastchecksum timestamp
				if (!map.containsKey(f.getAbsolutePath()))
                {
                    PutObjectRequest putRequest = new PutObjectRequest(bucket, f.getAbsolutePath(), f);

                    // Update object metadata
                    ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.addUserMetadata("checksum", calculateMD5(f));
                    objectMetadata.addUserMetadata("lastchecksumdate", Calendar.getInstance().getTime().toString());

                    // Store the file in the appropriate bucket on S3
                    putRequest.setMetadata(objectMetadata);
                    s3.putObject(putRequest);
                    System.out.println("Uploaded new object: " + f + " to the bucket " + bucket);
                }
			}

            // Keep track of which keys need to be removed from our lookup table later
            ArrayList<String> keysRemovedFromS3 = new ArrayList<String>();

            // Scan each file path (key) in the bucket
			for (String k : map.keySet()) {

                // Look for files that have been deleted on the user's FS
				if (!localFiles.contains(new File(k)))
                {
                    // Retrieve user metadata
                    HashMap<String, String> userMetadata = (HashMap)(s3.getObjectMetadata(bucket, k).getUserMetadata());
                    String checksum = userMetadata.get("checksum");

                    // Boolean to keep track of whether the file has been moved/renamed
                    boolean isMoved = false;

                    // Iterate through local FS to look for the existence of a file with the same MD5 checksum
                    // as the file that is missing
                    for (File f : localFiles) {

                        // If we find another copy of the file in the same directory:
                        if (calculateMD5(f).equals(checksum))
                        {
                            // The user must have moved or renamed the file.
                            isMoved = true;

                            // Delete the object from the bucket and mark the key for later removal
                            s3.deleteObject(bucket, k);
                            keysRemovedFromS3.add(k);
                            System.out.println("!!!FILE " + k + " HAS BEEN MOVED/RENAMED TO: " + f.getAbsolutePath());
                            // TODO: Store in moved/renamed report
                            logger.info(k + " has been moved renamed/moved to " + f.getAbsolutePath());
                        }
                    }
                    if (!isMoved)
                    {
					    // The file has been deleted on the user's FS but will be maintained in S3
                        // TODO: Let the user know that the file has been deleted by storing in report
                        System.out.println("Deletion: " + k);

                        // Auto-restore the file if it is configured:
                        // TODO: Recreate the directory if it has been deleted
                            // Currently only works if the directory still exists
                        if (autoRestore)
                        {
                            // Fetch the file from S3 and write a new file to the correct path in the user's FS
                            InputStream input = s3.getObject(bucket, k).getObjectContent();
                            File file = new File(k);
                            File parentDirectory = new File(file.getParent());
                            if (!parentDirectory.exists())
                            {
                                System.out.println("The indicated path does not exist!");
                                boolean success = parentDirectory.mkdirs();
                                if (success)
                                {
                                    System.out.println("Directory" + parentDirectory.getAbsolutePath() + "created");
                                }
                            }
                            org.apache.commons.io.IOUtils.copy(input, new FileOutputStream(file));
                            System.out.println("File restored to: " + k);
                            logger.info("File restored to: " + k);
                        }
                    }
                    // TODO: Generate and email a report for files that have been moved, renamed, or deleted.
                        // The frequency of the reports should be configurable using reportSched
                }
			}

            // Delete keys of deleted objects from our lookup table
            for (String s : keysRemovedFromS3)
            {
                map.remove(s);
            }

			// Scan for files that have not been checked in <checkDays> days
            for (String k : map.keySet()) {
                HashMap<String, String> userMetadata = (HashMap)(s3.getObjectMetadata(bucket, k).getUserMetadata());
                String ckd = userMetadata.get("lastchecksumdate");

                // Parse date from metadata into a java.util.Date object
                SimpleDateFormat sdf = new SimpleDateFormat("EEE MMM dd HH:mm:ss zzz yyyy");
                Date date = sdf.parse(ckd);

                // Calculate the number of days between the lastchecksumdate and the current time
                int daysSinceLastCheck = Days.daysBetween(LocalDate.fromDateFields(date), LocalDate.fromDateFields(Calendar.getInstance().getTime())).getDays();

                // If file has not been checked for a certain number of days, compare checksums
                if (daysSinceLastCheck > checkDays)
                {
                    // Generate checksum of local file and retrieve checksum from metadata from the cloud file
                    File localFile = localFiles.get(localFiles.indexOf(new File(k)));
                    String localMD5 = calculateMD5(localFile);
                    String cloudMD5 = userMetadata.get("checksum");

                    // If the checksum has changed, compare filesystem modified date with database record
                    if (!localMD5.equals(cloudMD5))
                    {
                        long localDate = localFile.lastModified();
                        long cloudDate = s3.getObjectMetadata(bucket,k).getLastModified().getTime();

                        if (localDate > cloudDate)
                        {
                            System.out.println("New file version detected... updating file to S3");
                            PutObjectRequest putRequest = new PutObjectRequest(bucket, k, localFile);

                            // Update object metadata
                            ObjectMetadata objectMetadata = new ObjectMetadata();
                            objectMetadata.addUserMetadata("checksum", calculateMD5(localFile));
                            objectMetadata.addUserMetadata("lastchecksumdate", Calendar.getInstance().getTime().toString());

                            // Store the file in the appropriate bucket on S3
                            putRequest.setMetadata(objectMetadata);
                            s3.putObject(putRequest);
                            System.out.println("Successfully updated: " + k + " in bucket " + bucket);
                            logger.info("Successfully updated: " + k + " in bucket " + bucket);

                            // TODO: Generate a report indicating that the file has been modified.
                        }
                        else
                        {
                            throw new Exception(k + "is corrupted");

                            // TODO: Generate some kind of error report; ask user to replace local file with backup file
                        }
                    }

                    // If checksums match, update last checked date
                    else
                    {
                        ObjectMetadata objectMetadata = new ObjectMetadata();
                        objectMetadata.addUserMetadata("checksum", calculateMD5(localFile));
                        objectMetadata.addUserMetadata("lastchecksumdate", Calendar.getInstance().getTime().toString());

                        // It doesn't seem like we can update the file's metadata without creating a new PUT or COPY request
                        CopyObjectRequest copyObjReq = new CopyObjectRequest(bucket, k, bucket, k);
                        copyObjReq.setNewObjectMetadata(objectMetadata);
                        s3.copyObject(copyObjReq);
                    }
                }
            }
		}
    }

    /**
     * Lists all files given a directory path
     */
	public static List<File> listFiles(String path) {
		return listFiles(new File(path));
	}
	
    public static List<File> listFiles(File path) {

        File[] list = path.listFiles();
        ArrayList<File> fileList = new ArrayList<File>(list.length);

        for (File f : list) {
            if (f.isDirectory()) {
                fileList.addAll(listFiles(f.getAbsolutePath()));
            } else {
                fileList.add(f.getAbsoluteFile());
            }
        }

        return fileList;
    }

    /**
     * Take an object listing from AWS and turns it into a map with filename:metadata key pairs.
     * Note that everything in S3 is referred to as an "object" which has 1) content and 2) various metadata about it.
     * The metadata is stored in the S3ObjectSummary class.
     * @param ol
     * @return
     */
    public static HashMap<String, S3ObjectSummary> listS3Keys(ObjectListing ol) {
    	List<S3ObjectSummary> olist = ol.getObjectSummaries();
    	HashMap<String, S3ObjectSummary> map = new HashMap<String, S3ObjectSummary>(olist.size());
    	
    	for (S3ObjectSummary o : olist) {
    		map.put(o.getKey(), o);
    	}
    
    	return map;
    }

    public static String calculateMD5(File f) throws IOException
    {
        //MD5DigestCalculatingInputStream md5DigestCalculatingInputStream = new MD5DigestCalculatingInputStream(new FileInputStream(f));
        //return new String(Hex.encodeHex(md5DigestCalculatingInputStream.getMd5Digest()));
        return DigestUtils.md5Hex(new FileInputStream(f));
    }

    // This method was used primarily for debugging--to check for the content of our InputStream
    // http://eureka.ykyuen.info/2010/09/19/java-convert-inputstream-to-string/
    public static String convertStream(InputStream is) throws IOException {
        StringBuilder sb = new StringBuilder();
        String line;

        try {
            BufferedReader reader = new BufferedReader(new InputStreamReader(is, "UTF-8"));
            while ((line = reader.readLine()) != null) {
                sb.append(line).append("\n");
            }

        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            is.close();
        }
        return sb.toString();
    }

    /**
     * Creates a temporary file with text data to demonstrate uploading a file
     * to Amazon S3
     *
     * @return A newly created temporary file with text data.
     *
     * @throws java.io.IOException
     */
    private static File createSampleFile() throws IOException {
        File file = File.createTempFile("aws-java-sdk-", ".txt");
        file.deleteOnExit();

        Writer writer = new OutputStreamWriter(new FileOutputStream(file));
        writer.write("abcdefghijklmnopqrstuvwxyz\n");
        writer.write("01234567890112345678901234\n");
        writer.write("!@#$%^&*()-=[]{};':',.<>/?\n");
        writer.write("01234567890112345678901234\n");
        writer.write("abcdefghijklmnopqrstuvwxyz\n");
        writer.close();

        return file;
    }

}