package edu.harvard.s3xyback;

import com.amazonaws.auth.PropertiesCredentials;
import com.amazonaws.services.s3.AmazonS3EncryptionClient;
import com.amazonaws.services.s3.model.*;
import org.apache.commons.codec.digest.DigestUtils;
import org.joda.time.Days;
import org.joda.time.LocalDate;

import javax.crypto.SecretKey;
import java.io.*;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.logging.FileHandler;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.logging.SimpleFormatter;

// make sure there is a main method here (parameter for location of config file)

public class S3xyBack {

    // How frequently should reports be generated
    enum ReportSchedule {DAILY, WEEKLY, MONTHLY}
    private Logger logger;
    private String configVersion;
    private String[] dir;
    private int checkDays;
    private ReportSchedule reportSched;
    private boolean autoRestore;
    private SecretKey key;
    private AmazonS3EncryptionClient s3;
    private String bucketName;
    private String bucket;
    private ArrayList<String> map;
    private List<File> localFiles;
    
    public S3xyBack() throws Exception{
        initialize();
    }
    
    public Logger getLogger() {
        return logger;
    }
    
    public String[] getDir() {
        return dir;
    }
    
    public String getBucket() {
        return bucket;
    }
    
    public String getBucketName() {
        return bucket;
    }
    
    public AmazonS3EncryptionClient getS3() {
        return s3;
    }
    
    public ArrayList<String> getMap() {
        return map;
    }
    
    public static void main(String[] args) throws Exception {
        S3xyBack s3xy = new S3xyBack();

    }
    public void initialize() throws Exception {
        // TODO: create an AWS account for new users upon installation and send them their credentials
        
        // Creates logger
        this.logger = Logger.getLogger("MyLog");
        FileHandler fh;
        try {
            // This block configures the logger with handler and formatter
            fh = new FileHandler("MyLogFile.log", true);
            logger.addHandler(fh);
            logger.setLevel(Level.ALL);
            SimpleFormatter formatter = new SimpleFormatter();
            fh.setFormatter(formatter);

        } catch (SecurityException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        }
                
        // Load configuration file
        Properties configFile = new Properties();
        configFile.load(S3xyBack.class.getResourceAsStream("my_properties.properties"));

        // TODO: Check the version of the config file format to detect incompatible upgrades

        // Read parameters from the config file:
        
        this.dir = configFile.getProperty("directories").split(",");
        this.checkDays = Integer.parseInt(configFile.getProperty("checkDays"));
        this.reportSched = ReportSchedule.valueOf(configFile.getProperty("reportSched"));
        this.autoRestore = Boolean.valueOf(configFile.getProperty("autoRestore"));
        this.bucketName = configFile.getProperty("bucketName");
        this.localFiles = new ArrayList<File>();

        // check for encryption key,  
           // if none, generate an error message
        // TODO: allow user to configure another password
        File keyFile = new File("aws_key");
        if (keyFile.exists()) {
            this.key = AESKeyFactory.readKey(keyFile);
        } else {
            throw new Exception("Could not find AES key");
        }

        // connect to AWS
        // TODO (future): support multi-tenancy scenario with dual-encrypted envelope keys
        // AWS provides encryption with its API (using our keys), however, multiple keys were not supported last time I
        // looked. This prevents us from decrypting all users' files in the multi-tenancy scenario. If we did the encryption
        // ourselves, we could create multiple "envelope keys" so that multiple parties could decrypt the same data
        // without sharing keys.
        s3 = new AmazonS3EncryptionClient(new PropertiesCredentials(new File("my_properties.properties")), new EncryptionMaterials(key));
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        // AWS storage is organized into flat buckets (not directories). We will generate a random string for
        // each new directory. That string will be stored in that directory within a .aws_bucket file
        // Note: the use of a random string is just to get started, it would probably be good to utilize the directory
        // name so that when we go to the AWS web console, we know which buckets match which directories
        String awsBucketFile = ".aws_bucket";

        // check for the existence of a bucket for the configured directory, if this directory is new
        // and a bucket has not yet been created, we will create a new bucket
        File bucketFile = new File(bucketName + awsBucketFile);
        System.out.println("bucketFile: " + bucketFile);
        if (bucketFile.exists()) {
            System.out.println("bucket exists!");
            BufferedReader in = new BufferedReader(new FileReader(bucketFile));
            bucket = in.readLine();
            in.close();

            if (!s3.doesBucketExist(bucket)) // The bucket was deleted from Amazon for some reason
                System.out.println(bucket + " does not exist");
                //throw new Exception("Bad bucket configuration for " + bucket);
            // TODO: since a user may decide to delete a bucket from Amazon, we should handle this more gracefully
            // perhaps prompt the user to re-create the bucket?

        }
        else {
            System.out.println("could not find bucket");
            // Generate the random string for the bucket name
            String uuid = UUID.randomUUID().toString().replaceAll("[^A-Za-z0-9]", "").toLowerCase();
            String baseName = new File(bucketName).getName().replaceAll("[^A-Za-z0-9]", "").toLowerCase();

            bucket = baseName + uuid;

            // write the new name to the .aws_bucket file for that directory
            org.apache.commons.io.FileUtils.writeStringToFile(bucketFile, bucket);

            // create the new bucket on Amazon S3
            System.out.println("Creating a bucket: " + bucket);
            s3.createBucket(bucket);
            logger.info(dir + ": " + bucket);
        }
        // Create a list of key values by traversing the bucket
        map = listS3Keys(s3.listObjects(bucket));
        // Collect all file paths in an ArrayList
        for (String d : dir) {
            List<File>tempFiles = listFiles(d);
            localFiles.addAll(tempFiles);  
        }
        // uploadFiles();
    }
    
    public void uploadFiles() throws Exception{

        // Scan for new files on the user's file system (FS)
        for (File f : localFiles) {

            // If a file has no entry in S3, generate the MD5 checksum and the lastchecksum timestamp
            if (!map.contains(f.getAbsolutePath()))
                upload(f);
        }
    }

    // Uploads a file to S3 and sets appropriate metadata.
    public void upload(File f) throws Exception {
        if (!f.isFile()) {
            throw new IllegalArgumentException("Argument must be of type File");
        }
        PutObjectRequest putRequest = new PutObjectRequest(bucket, f.getAbsolutePath(), f);

        // Update object metadata
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.addUserMetadata("checksum", calculateMD5(f));
        objectMetadata.addUserMetadata("lastchecksumdate", Calendar.getInstance().getTime().toString());

        // Update map
        map.add(f.getAbsolutePath());
        
        // Store the file in the appropriate bucket on S3
        putRequest.setMetadata(objectMetadata);
        s3.putObject(putRequest);
        System.out.println("Uploaded new object: " + f + " to the bucket " + bucket);
    }
    
    // Auto-restore the file if it is configured:
    // TODO: Recreate the directory if it has been deleted
    // Currently only works if the directory still exists
    public void autoRestore() throws Exception {
        
        // Recreates the list of key values by traversing each bucket
        map = listS3Keys(s3.listObjects(bucket));
        
        // Scan each file path (key) in the bucket
        for (String k : map) {

            // Look for files that have been deleted on the user's FS
            if (!localFiles.contains(new File(k)))
            {
                if (autoRestore)
                {
                    // Fetch the file from S3 and write a new file to the correct path in the user's FS
                    InputStream input = s3.getObject(bucket, k).getObjectContent();
                    File file = new File(k);
                    File parentDirectory = new File(file.getParent());
                    if (!parentDirectory.exists())
                    {
                        System.out.println("The indicated path does not exist!");
                        if (parentDirectory.mkdirs())
                            logger.info("Directory" + parentDirectory.getAbsolutePath() + "created");
                    }
                    org.apache.commons.io.IOUtils.copy(input, new FileOutputStream(file));
                    logger.info("File restored to: " + k);
                }
                // TODO: Generate and email a report for files that have been moved, renamed, or deleted.
                    // The frequency of the reports should be configurable using reportSched
            }
        }
    }
    
    // deletes objects in the cloud that are duplicated on the filesystem
    public void deleteDuplicates() throws Exception {
        
        for (String k: map) {
            // Retrieve user metadata
            Map<String, String> userMetadata = s3.getObjectMetadata(bucket, k).getUserMetadata();
            String checksum = userMetadata.get("checksum");

            // Iterate through local FS to look for the existence of a file with the same MD5 checksum
            // as the file that is missing
            for (File f : localFiles) {

                // If we find another copy of the file in the same directory:
                if (calculateMD5(f).equals(checksum))
                {
                    // Delete the object from the bucket and map if a duplicate is found
                    if (map.contains(f.toString())) {
                        s3.deleteObject(bucket, k);
                        map.remove(k);
                    }
                    // TODO: Store in moved/renamed report
                    logger.info(k + " has been removed. A duplicate was found at " + f.getAbsolutePath());
                }
            }

            // The file has been deleted on the user's FS but will be maintained in S3
            // TODO: Let the user know that the file has been deleted by storing in report
            System.out.println("Deletion: " + k);
        }
    }
    
    public void checkCorruption() throws Exception {

        // Scan for files that have not been checked in <checkDays> days
        for (String k : map) {
            Map<String, String> userMetadata = s3.getObjectMetadata(bucket, k).getUserMetadata();
            String ckd = userMetadata.get("lastchecksumdate");

            // Parse date from metadata into a java.util.Date object
            SimpleDateFormat sdf = new SimpleDateFormat("EEE MMM dd HH:mm:ss zzz yyyy");
            Date date = sdf.parse(ckd);

            // Calculate the number of days between the lastchecksumdate and the current time
            int daysSinceLastCheck = Days.daysBetween(LocalDate.fromDateFields(date), LocalDate.fromDateFields(Calendar.getInstance().getTime())).getDays();

            // If file has not been checked for a certain number of days, compare checksums
            if (daysSinceLastCheck > checkDays)
            {
                // Generate checksum of local file and retrieve checksum from metadata from the cloud file
                File localFile = localFiles.get(localFiles.indexOf(new File(k)));
                String localMD5 = calculateMD5(localFile);
                String cloudMD5 = userMetadata.get("checksum");

                // If the checksum has changed, compare filesystem modified date with database record
                if (!localMD5.equals(cloudMD5))
                {
                    long localDate = localFile.lastModified();
                    long cloudDate = s3.getObjectMetadata(bucket, k).getLastModified().getTime();

                    if (localDate > cloudDate)
                    {
                        System.out.println("New file version detected... updating file to S3");
                        PutObjectRequest putRequest = new PutObjectRequest(bucket, k, localFile);

                        // Update object metadata
                        ObjectMetadata objectMetadata = new ObjectMetadata();
                        objectMetadata.addUserMetadata("checksum", calculateMD5(localFile));
                        objectMetadata.addUserMetadata("lastchecksumdate", Calendar.getInstance().getTime().toString());

                        // Store the file in the appropriate bucket on S3
                        putRequest.setMetadata(objectMetadata);
                        s3.putObject(putRequest);
                        logger.info("Successfully updated: " + k + " in bucket " + bucket);

                        // TODO: Generate a report indicating that the file has been modified.
                    }
                    else
                    {
                        throw new Exception(k + "is corrupted");

                        // TODO: Generate some kind of error report; ask user to replace local file with backup file
                    }
                }

                // If checksums match, update last checked date
                else
                {
                    ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.addUserMetadata("checksum", calculateMD5(localFile));
                    objectMetadata.addUserMetadata("lastchecksumdate", Calendar.getInstance().getTime().toString());

                    // It doesn't seem like we can update the file's metadata without creating a new PUT or COPY request
                    CopyObjectRequest copyObjReq = new CopyObjectRequest(bucket, k, bucket, k);
                    copyObjReq.setNewObjectMetadata(objectMetadata);
                    s3.copyObject(copyObjReq);
                }
            }
        }
    }
    
    /**
     * Lists all files given a directory path
     */
    public static List<File> listFiles(String path) {
            return listFiles(new File(path));
    }
        
    public static List<File> listFiles(File path) {
        File[] list = path.listFiles();
        ArrayList<File> fileList = new ArrayList<File>(list.length);
        for (File f : list) {
            if (f.isDirectory()) {
                fileList.addAll(listFiles(f.getAbsolutePath()));
            } else {
                fileList.add(f.getAbsoluteFile());
            }
        }
        return fileList;
    }
    
    public static ArrayList<String> listS3Keys(ObjectListing ol) {
        List<S3ObjectSummary> olist = ol.getObjectSummaries();
        ArrayList<String> map = new ArrayList<String>();
        
        for (S3ObjectSummary o : olist) {
            map.add(o.getKey());
        }
        
        return map;
    }

    /**
     * Take an object listing from AWS and turns it into a map with filename:metadata key pairs.
     * Note that everything in S3 is referred to as an "object" which has 1) content and 2) various metadata about it.
     * The metadata is stored in the S3ObjectSummary class.
     * @param ol
     * @return
     */
//    public static Map<String, S3ObjectSummary> listS3Keys(ObjectListing ol) {
//      List<S3ObjectSummary> olist = ol.getObjectSummaries();
//      Map<String, S3ObjectSummary> map = new HashMap<String, S3ObjectSummary>(olist.size());
//      
//      for (S3ObjectSummary o : olist) {
//            map.put(o.getKey(), o);
//      }
//        
//      return map;
//    }

    public static String calculateMD5(File f) throws IOException
    {
        return DigestUtils.md5Hex(new FileInputStream(f));
    }


    /**
     * Creates a temporary file with text data to demonstrate uploading a file
     * to Amazon S3
     *
     * @return A newly created temporary file with text data.
     *
     * @throws java.io.IOException
     */
    private static File createSampleFile() throws IOException {
        File file = File.createTempFile("aws-java-sdk-", ".txt");
        file.deleteOnExit();

        Writer writer = new OutputStreamWriter(new FileOutputStream(file));
        writer.write("abcdefghijklmnopqrstuvwxyz\n");
        writer.write("01234567890112345678901234\n");
        writer.write("!@#$%^&*()-=[]{};':',.<>/?\n");
        writer.write("01234567890112345678901234\n");
        writer.write("abcdefghijklmnopqrstuvwxyz\n");
        writer.close();

        return file;
    }

}