/*
 ============================================================================
 Name        : GZipIO.c
 Version     : 4.1
 Description : Coding sample for utilizing the HTTP Compression feature.

               The benefit of HTTP Compression is to reduce the network band-
               width required to transfer objects to HCP.  Not all transfers
               will benefit from this feature. Some content is already compressed
               and the cost to perform compression/decompression on the client
               and HCP may not justify the network bandwidth savings.

               This sample uses the libcurl open-source library to perform the
               HTTP communication to HCP.

 License     :
               Copyright (c) 2011 Hitachi Data Systems, Inc.

               Permission is hereby granted to  this software and associated
               documentation files (the "Software"), subject to the terms and
               conditions of the Sample Source Code License (SSCL) delivered
               with this Software. If you do not agree to the terms and
               conditions of the SSCL,

                 (i)  you must close this file and delete all copies of the
                      Software, and
                 (ii) any permission to use the Software is expressly denied.

 ============================================================================
 */

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>

#include <curl/curl.h>

#include <openssl/evp.h>
#include <zlib.h>

#include "hcpUtils.h"

#define INPUT_FILE_NAME "compress.txt.toHCP"
#define OUTPUT_FILE_NAME "compress.txt.fromHCP"

#define HCP_PROTOCOL "https"
#define HCP_FILE_PATH "rest/examples/compress.txt"


typedef struct {
	z_stream zlib_stream;
	FILE *localFile;
} item_t;

// Chunk size of content read/write to local file
#define CHUNK_SIZE 16384

/**
 * This definition is used for the windowBits parameter of the ZLIB deflateInit2
 * and inflateInit2 functions. This parameter is explained as the base 2 of the
 * window size (the size of the history buffer). The range is 8-15 (or 256 to
 * 32768 bytes). This history buffer is used to make the most efficient
 * compression based on what has already been deflated.
 *
 * One trick to notice is that if 16 is added to this parameter, it means a gzip
 * header and trailer will be added/interpreted when the zlib operates on the
 * stream.
 *
 * The window size for the inflateInit2 must be equal to or greater than what was
 * used for the deflateInit2 that created the compressed file.
 **/
#define ZLIB_WINDOWBITS (15+16)


/**
 *  This call back function is called by CURL for an HTTP PUT request to
 *  obtain the message-body content to be read from somewhere. The content to
 *  be returned to CURL needs to be in gzip format, so after we read it from
 *  the file, we have to deflate it before passing it back to CURL for the HTTP
 *  PUT Request.
 */
size_t deflated_read_cb(void *ptr, size_t size, size_t nmemb, void *userdata) {

	unsigned char in[CHUNK_SIZE];  // This is our buffer for reading content to be compressed.

	item_t *pMyItem = (item_t *)userdata;

	// If EOF, we already read the whole file.  Nothing more to do.
	if (feof(pMyItem->localFile)) {
		return 0;
	}

	// Point the stream to what we need to fill up.
	pMyItem->zlib_stream.avail_out = size * nmemb;
	pMyItem->zlib_stream.next_out = ptr;

	/**
	 * Keep looping reading content and deflating until the output buffer is full.
	 *   Makes for better compression and less network traffic.
	 */
	int flush = Z_SYNC_FLUSH;
	do {
		// Read the next chunk size from the file into the stream.
		pMyItem->zlib_stream.next_in = in;
		pMyItem->zlib_stream.avail_in = fread(in, 1, CHUNK_SIZE, pMyItem->localFile);
		if (ferror(pMyItem->localFile))
		{
			fprintf(stderr, "Failed reading source file in deflated_read function");

			return CURL_READFUNC_ABORT;
		}

		// Determine what we need to tell the deflate function. If we are not going to get more content,
		//   then we need to do a FINISH.  Otherwise, we will get what we have.
		//   Humm... I wonder if the Z_SYNC_FLUSH is not as efficient?  Extra mem copies?
		flush = feof(pMyItem->localFile) ? Z_FINISH : Z_SYNC_FLUSH;

		int ret = deflate(&(pMyItem->zlib_stream), flush);
		if (Z_STREAM_ERROR == ret) {
			fprintf(stderr, "deflate function failed with code %d\n", ret);

			return CURL_READFUNC_ABORT;
		}

		// Let's be paranoid.  Should never happen (famous last words).
		if (0 != pMyItem->zlib_stream.avail_in) {
			// The library is supposed to be doing internal buffering so this shouldn't ever happen.
			fprintf(stderr, "Oops!!  After a call to deflate there is still stuff in the input array! This is not supposed to happen!!\n");

			return CURL_READFUNC_ABORT;
		}
	} while (pMyItem->zlib_stream.avail_out != 0 && Z_FINISH != flush );

	// All done with filling the buffer or ran out of content. Return what we have.
	return (size * nmemb) - pMyItem->zlib_stream.avail_out;
}

/**
 * This routine performs and HTTP PUT of an object using gzip compression.
 * The example operates on a non-compressed file and performs a buffered
 * compression to avoid consuming memory and/or disk resources.  Good for
 * very large files.
 */
int do_write(CURL *curl, const char *srcFilePath, const char *dstFilePath, const char *inAuthHeader) {
	int retval = EXIT_SUCCESS;
	struct curl_slist *httpHeaderList = NULL;

	/***
	 *** Initialize the "item" structure to be passed to CURL callback functions
	 *** to perform the HTTP GET operations.  This structure contains a pointer to
	 *** the source file and an initialized zlib object.
	 ***/

	item_t myItem;
	memset(&myItem, 0, sizeof(item_t)); // clear out structure.

	// Open file to be compressed.
	myItem.localFile = fopen(srcFilePath, "rb");
	if(!myItem.localFile) {
		fprintf(stderr, "Unable to open file %s\n", srcFilePath);
		_exit(EXIT_FAILURE);
	}

	/**
	 * Initialize the stream structure for deflate.
	 *
	 * The 4th parameter is the windowBits. It is explained as the base 2 of the
	 * window size (the size of the history buffer). The range is 8-15 (or 256 to
	 * 32768 bytes). This history buffer is used to make the most efficient
	 * compression based on what has already been deflated.  One trick to notice
	 * is that if 16 is added to this parameter, it means a gzip header and
	 * trailer will be added.
	 *
	 * Between the windowBits parameter and the 5th parameter, memLevel, will produce a
	 * trade-off between memory usage, compute time, and compression effectiveness.
	 */
	int err = deflateInit2(&myItem.zlib_stream, Z_DEFAULT_COMPRESSION, Z_DEFLATED, ZLIB_WINDOWBITS, 8, Z_DEFAULT_STRATEGY);
	if (Z_OK != err) {
		fprintf(stderr, "Failed to to initialize deflate (zlib) structure\n");
		_exit(EXIT_FAILURE);
	}

	/***
	 *** Setup CURL reference for performing the HTTP PUT.
	 ***/
	curl_easy_reset(curl);

	// Bypass any SSL Key checks because we are not using site specific keys.
    //  Not needed if only doing HTTP
	curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L);
	curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 1L);

	// Provide the response header call back function.  This is just for
	//  debugging/development purposes. It will cause the HTTP headers for
	//  the response to be output to stdout.
	curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, &headerProcessor);

	// Indicate this CURL operation is an HTTP PUT.
	curl_easy_setopt(curl, CURLOPT_UPLOAD, 1L);  // Note: CURLOPT_PUT is deprecated in 7.12.1

	// Set the URL to the destination file on HCP
	curl_easy_setopt(curl, CURLOPT_URL, dstFilePath);

	//
	// Setup the callback functions that should be used to read data from
	//  the source file. The source file and the initialized zlib instance
	//  is stored in the "myItem" structure.
	curl_easy_setopt(curl, CURLOPT_READFUNCTION, deflated_read_cb);
	curl_easy_setopt(curl, CURLOPT_READDATA, (void *)&myItem);

	//
	// Setup the header to indicate we are sending a gzip file and also include
	//   Authorization header.
	//
	// This example is a streaming example, so the size is not known ahead of time.
	// Therefore, we must send the content as "chunked" HTTP transfer encoding.
	//
	// However, if the file is already compressed on disk, the size can be determined
	// and could be indicated by sending a "Content-Length" HTTP header with the
	// size.

	httpHeaderList = curl_slist_append(httpHeaderList, "Transfer-Encoding: chunked");
	httpHeaderList = curl_slist_append(httpHeaderList, "Content-Encoding: gzip");

	// Use this HCP Authorization header to perform authentication.
	httpHeaderList = curl_slist_append(httpHeaderList, inAuthHeader);

	curl_easy_setopt(curl, CURLOPT_HTTPHEADER, httpHeaderList);


	/***
	 *** Execute the HTTP PUT request.
	 ***/

	CURLcode curlRetVal = curl_easy_perform(curl);
	if(curlRetVal != CURLE_OK) {
		fprintf(stderr, "Error writing %s to HCP!\nReturn Code: %d\n", srcFilePath, curlRetVal);
		retval = EXIT_FAILURE;
		goto cleanup;
	}

	/***
	 *** Process the HTTP Response.
	 ***/

	long responseCode = 0;

	curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &responseCode);

	// Any code not in the 200 range signals a failure.
	if (2 != responseCode / 100) {
		fprintf(stderr, "ERROR: Unexpected response code from server (%d). Operation failed.\n", (int)responseCode);
		retval = EXIT_FAILURE;
	}


  cleanup:

    deflateEnd(&(myItem.zlib_stream));

    /*
     *  Even though the curl handle has a pointer to it, we still own this list.
     */
    if (httpHeaderList) {
    	curl_slist_free_all(httpHeaderList);

    	// Clear out the HTTP Header pointer we setup just in case this curl connection
    	// is used for another follow up operation.
    	curl_easy_setopt(curl, CURLOPT_HTTPHEADER, NULL);
    }

	return retval;
}

/**
 * This routine performs and HTTP GET of an object and requests gzip compression
 * over HTTP.
 *
 * Seems that libcurl has built in support for requesting gzip encoding, so the
 * implementation for do_read could have been easier than do_write.
 *
 * The following contains both ways of performing the do_read by either letting
 * CURL handle the inflation, or do it in the callback functions.  If the
 * USE_CURL_FOR_INFLATE is defined, it will activate the code to let libcurl do
 * the inflation of content.
 */
int do_read(CURL *curl, const char *srcFilePath, const char *dstFilePath, const char *inAuthHeader) {

	int retval = EXIT_SUCCESS;
	struct curl_slist *httpHeaderList=NULL;

	// Open the destination file that will be created by the GET.
	FILE *dstFile = fopen(dstFilePath, "wb");
	if (NULL == dstFile) {
		fprintf(stderr, "do_read(): Failed to open file (%s) for write.\n", dstFilePath);
		return EXIT_FAILURE;
	}

	/***
	 *** Initialize the CURL object for performing the GET request.
	 ***/

	// Reset curl to clear out prior usage.
	curl_easy_reset(curl);

	// Bypass any SSL Key checks because we are not using site specific keys.
    //  Not needed if only doing HTTP
	curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L);
	curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 1L);

	// Provide the response header call back function.  This is just for
	//  debugging/development purposes. It will cause the HTTP headers for
	//  the response to be output to stdout.
	curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, &headerProcessor);

	// Indicate this CURL operation is an HTTP GET.
	curl_easy_setopt(curl, CURLOPT_HTTPGET, 1L);

	// Put decompressed content into this file. The libcurl implementation
	//   will handle all file I/O using stock stdio operations.
	curl_easy_setopt(curl, CURLOPT_WRITEDATA, dstFile);
	curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL); // Use the default function

	// Tell it to give use gzip encoding over the wire.
	curl_easy_setopt(curl, CURLOPT_ENCODING, "gzip");

	// Set the URL to the source object on HCP
	curl_easy_setopt(curl, CURLOPT_URL, srcFilePath);

	// Use this HCP Authorization header to perform authentication.
	httpHeaderList = curl_slist_append(httpHeaderList, inAuthHeader);
	curl_easy_setopt(curl, CURLOPT_HTTPHEADER, httpHeaderList);

	/***
	 *** Execute the HTTP GET request.
	 ***/

	CURLcode curlRetVal = curl_easy_perform(curl);
	if(curlRetVal != CURLE_OK) {
		fprintf(stderr, "Error reading %s from HCP!\nReturn Code: %d\n", srcFilePath, curlRetVal);
		retval = EXIT_FAILURE;
		goto cleanup;
	}

	/***
	 *** Process the HTTP Response.
	 ***/

	long responseCode = 0;

	curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &responseCode);

	// Any code not in the 200 range signals a failure.
	if (2 != responseCode / 100) {
		fprintf(stderr, "ERROR: Unexpected response code from server (%d). Operation failed.\n", (int)responseCode);
		retval = EXIT_FAILURE;
	}

  cleanup:

    /* Close the destination File */
    if (NULL != dstFile) {
  	  fclose(dstFile);

      if (EXIT_SUCCESS != retval)
        unlink(dstFilePath);  // Remove any residual file.
    }

    /*
     *  Even though the curl handle has a pointer to it, we still own this list.
     */
    if (httpHeaderList) {
    	curl_slist_free_all(httpHeaderList);

    	// Clear out the HTTP Header pointer we setup just in case this curl connection
    	// is used for another follow up operation.
    	curl_easy_setopt(curl, CURLOPT_HTTPHEADER, NULL);
    }

	return retval;
}

/**
 * This program is the main line for an example on writing (PUT) and reading (GET)
 * GZIP compressed content with HCP.
 *
 * This is only supported in HCP 4.1 and newer.
 */
int main(int argc, char **argv) {
	int retval = EXIT_SUCCESS;

	/****
	 **** Make sure we have the required parameters.
	 ****/
	if (argc != 4) {
		char *basename = strrchr(argv[0], '/');
		if (NULL == basename)
			basename = argv[0];
		else
			basename++;

		fprintf(stderr, "\nUsage: %s <DNS-Namespace> <UserName> <Password>\n", basename);
		fprintf(stderr, "  where <DNS-Namespace> is the fully qualified DNS name of the HCP Namespace.\n");
		fprintf(stderr, "              For example: \"ns1.ten1.myhcp.example.com\"\n");
		fprintf(stderr, "        <Username> and <Password> are the credentials of the HCP user with data\n");
		fprintf(stderr, "              access permissions for the namespace.\n\n");

		exit(EXIT_FAILURE);
	}

	/****
	 **** Initialize the execution environment for HTTP over libcurl
	 ****/

	/* platform-specific setup */
	curl_global_init(CURL_GLOBAL_ALL);

	/* Build the HCP file path. */
	char *HCPFilePath = malloc( strlen(HCP_PROTOCOL) + 3
			+ strlen(argv[1]) + 1 + strlen(HCP_FILE_PATH) + 1 );
	sprintf(HCPFilePath, "%s://%s/%s", HCP_PROTOCOL, argv[1], HCP_FILE_PATH);

	CURL *curl = NULL; /* curl session handle */

	// Use this HCP security cookie to perform authentication.
	char *authHeader = createAuthHeader(argv[2], argv[3]);

	/*
	 * Create a curl session to use for the operations.
	 */
	curl = curl_easy_init();
	if(!curl) {
		fprintf(stderr, "Unable to initialize cURL HTTP library\n");

		retval = EXIT_FAILURE;
		goto cleanup;
	}

	/****
	 **** Perform a write (PUT) of GZIP compressed content on HCP.
	 ****/
	retval = do_write(curl, INPUT_FILE_NAME, HCPFilePath, authHeader);
	if (EXIT_SUCCESS != retval) {
		fprintf(stderr, "FAILURE: do_write() failed!!\n");

		retval = EXIT_FAILURE;
		goto cleanup;
	}

	/****
	 **** Perform a read (GET) of GZIP compressed content from HCP.
	 ****/
	retval = do_read(curl, HCPFilePath, OUTPUT_FILE_NAME, authHeader);
	if (EXIT_SUCCESS != retval) {
		fprintf(stderr, "FAILURE: do_read() failed!!\n");

		retval = EXIT_FAILURE;
		goto cleanup;
	}

	/****
	 **** SUCCESS!!  Just clean-up and get outta here.
	 ****/
cleanup:
	if (HCPFilePath) free(HCPFilePath);

	if (authHeader) free(authHeader);

    /* free curl session */
	if(curl) {
		curl_easy_cleanup(curl);
		curl = NULL;
	}

	/* free platform bindings */
	curl_global_cleanup();

	return EXIT_SUCCESS;
}
