# -*- coding: utf-8 -*-

#    Stingingnettlepy - A static website generator written in Python
#    Copyright (C) 2013  Jannik Haberbosch
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.


# Importion of modules from the Python Standard Library.
import errno
import stat
import os
import hashlib
import os.path
import shutil
import json
import logging
import posixpath


def prwalk( sftp, path ):
	"""os.walk() for paramiko's SFTP object.

	This function is almost similar to os.walk(), except that it doesn't accept any
	keyword arguments such as 'Topdown'.
	Symbolic links are not followed and not taken into account.
	It yields a tuple consisting of 3 items, which means that it is a generator.

	Args:
		sftp: An instance of the class 'paramiko.SFTPClient'.
		path: An absolute path to the base directory on a remote Linux server.

	Returns:
		A tuple consisting of 3 items: 'path', 'dirs', 'files'.

		'path' is the absolute path to the current directory.
		'dirs' are the directories found in 'path'.
		'files' are the files found in 'path'.

	"""
	# Get a list consisting of the files and directories of the current directory.
	files_dirs = sftp.listdir( path )

	# These two lists hold directories and files.
	dirs = []
	files = []

	# Walking through everything that's in the directory 'path'.
	for name in files_dirs:
		# Fetching the statistics of the file/directory in order to decide what it is.
		name_stat = sftp.lstat( posixpath.join( path, name ) )
		# Is 'name' a directory, or a file?			
		if( stat.S_ISDIR( name_stat.st_mode ) != 0 ):
			# It is a directory.
			dirs.append( name )
		elif stat.S_ISLNK( name_stat.st_mode ) != 0:
			# It is a symbolic link.  Skip those.
			continue
		else:
			# It is a regular file.
			files.append( name )

	# Yielding a 3 item tuple consisting of the absolute path, the directories and the files.
	yield path, dirs, files

	# Going into sub-directories, if there are any.
	for name in dirs:
		# Generating a new absolute path for a specific sub-directory.
		new_path = posixpath.join( path, name )
		# This function is called for each sub-directory. (Recursive)
		for x in prwalk( sftp, new_path ):
			# Yielding a 3 item tuple consisting of the absolute path, the directories and the files.
			yield x


def prexists( sftp, path ):
	"""os.path.exists() for paramiko's SFTP object.

	Args:
		sftp: An instance of the 'paramiko.SFTPClient' class.
		path: The full path to a directory on a remote Linux server.

	Returns:
		A boolean, whose value is 'True' if the absolute path 'path' exists on the remote Linux server.
		Otherwise, the value of the boolean is 'False', indicating that path 'path' does not exist
		on the remote Linux server.

	Raises:
		IOError
	"""
	try:
		sftp.stat( path )
	except IOError, e:
		if e.errno == errno.ENOENT:
			return False
	return True


def prisdir( sftp, path ):
	"""os.path.isdir() for paramiko's SFTP object.

	Args:
		sftp: An instance of the 'paramiko.SFTPClient' class.
		path: A full path on a remote Linux server that should be pointing to a directory.

	Returns:
		A boolean, whose value is 'True' if 'path' is indeed a directory.  Otherwise the value of
		the boolean is 'False', which indicates that 'path' is not a directory.
	"""
	sftp_attributes = sftp.stat( path )
	if stat.S_ISDIR( sftp_attributes.st_mode ):
		return True
	else:
		return False


def prisfile( sftp, path ):
	"""os.path.isfile() for paramiko's SFTP object.

	Args:
		sftp: An instance of the 'paramiko.SFTPClient' class.
		path: A full path to a directory on a remote Linux server.

	Returns:
		A boolean, whose value is 'True', if 'path' points to a file.
		Otherwise, the value of the boolean is 'False', and this value is returned.		
	"""
	sftp_attributes = sftp.stat( path )
	if stat.S_ISREG( sftp_attributes.st_mode ):
		return True
	else:
		return False


def paramiko_dirtree_hash_dict( sftp, path_to_root_dir ):
	"""Creates and returns a dictionary, consisting of two sub-dictionary, with a checksum for each directory and file.

	This function walks an entire directory tree, starting from 'path_to_root_dir'.
	For each directory and file, a sha256 hash is created by reading the content of that directory or file.
	This generated checksum is then stored as a value in either of the two dictionaries 'dirs' or 'files', where
	the 'key' is a string describing the absolute file path to a directory or file on the remote Linux server,
	and the 'value' another string, which represents the calculated 'checksum'.

	Files are read in binary mode.

	Forward slashes, as they are default on Linux machines, are being replaced by a backward slash before adding
	a key:value pair to a dictionary.

	A BLOCKSIZE is defined as a protection for reading really large files, that would otherwise cause failure.
	A dictionary 'dirtree_hashes', with two sub-dictionaries 'dirs' and 'files' is defined aswell.  This dictionary
	stores the key:value pairs of the directories and files.

	As mentioned before, this function makes use of the 'hashlib.sha256' class by creating an instance of it.

	The result of this function is used to compare two directory trees with each other in order to check, which
	directories and files have either been changed, deleted or added.

	Args:
		sftp: An instance of the 'paramiko.SFTPClient' class.
		path_to_root_dir: A string that represents an absolute path on a remote Linux server.

	Returns:
		A dictionary consisting of two sub-dictionaries 'dirs' and 'files'; each of them consisting of 0 or more
		key:value pairs.  Each key is the absolute path to a directory or a file on a remote Linux server.  Each
		value is the calculated checksum of that directory or file.

		For example:
			{
				"dirs": {
					"path\\to\\dir": "39084HFHWEFIWEU",
					"path\\to\\dir2\dir10": "39084/"§(BFWEF2"
				},
				"files": {
					"path\\to\\file1.txt\\: "DFGH$RQW()ERU"§(/§",
					"path\\to\\file2.txt\\: "&/"§/G("/GFUIEFHIU"
				}
			}
	"""
	# A file is read in blocks as I might encounter a very large file that may cause problems for an operating system.
	BLOCKSIZE = 65536

	# Defining the Python 'Dict' to store the names of files/directtories and their checksums.
	dirtree_hashes = { "dirs": {}, "files": {} }

	# Walking the directory tree.
	for dirpath, dirnames, filenames in prwalk( sftp, path_to_root_dir ):
		# Iterating through the list of found directories in "dirpath".
		for d in dirnames:
			# Creating a new instance of a 'hashlib.sha256' object.
			ho = hashlib.sha256()
			# Generating a checksum by using the name of a directory.
			ho.update( d )
			# Storing the result in the Python 'Dict' 'dirtree_hashes'.
			the_path = posixpath.join( dirpath, d ).partition( path_to_root_dir )[2].replace( "/", "\\" )
			dirtree_hashes["dirs"][the_path] = ho.hexdigest()

		# Iterating through the list of files found in the directory "dirpath".
		for f in filenames:
			# Opening a filename in 'read-binary' mode.
			with sftp.open( posixpath.join( dirpath, f ), 'rb' ) as file_handle:
				# Creating a new instance of a 'hashlib.sha256' object.
				ho = hashlib.sha256()
				# Reading BLOCKSIZE bytes of the file.  Storing the result in 'buf'.  Essentially a string is stored in buf.
				buf = file_handle.read( BLOCKSIZE )
				# Making sure that the file was not empty.
				while len( buf ) > 0:
					# Generating a checksum by using the read contents of a file.
					ho.update( buf )
					# Reading the next BLOCKSIZE bytes of the file and storing the content as string.
					buf = file_handle.read( BLOCKSIZE )
					# Storing the result in the Python 'Dict' 'dirtree_hashes'.
					# Actually, this is silly because this is done in every iteration of the while loop.  It should be done once the while loop is finished.
					# Need to think about this.  It might turn out as a bug at some point in time of development.
					the_path = posixpath.join( dirpath, f ).partition( path_to_root_dir )[2].replace( "/", "\\" )
					dirtree_hashes["files"][the_path] = ho.hexdigest()

	return dirtree_hashes


def dirtree_hash_dict( path_to_root_dir ):
	"""Creates and returns a dictionary, consisting of two sub-dictionary, with a checksum for each directory and file.

	This function walks an entire directory tree, starting from 'path_to_root_dir'.
	For each directory and file, a sha256 hash is created by reading the content of that directory or file.
	This generated checksum is then stored as a value in either of the two dictionaries 'dirs' or 'files', where
	the 'key' is a string describing the absolute file path to a directory or file on the local system,
	and the 'value' another string, which represents the calculated 'checksum'.

	Files are read in binary mode.

	A BLOCKSIZE is defined as a protection for reading really large files, that would otherwise cause failure.
	A dictionary 'dirtree_hashes', with two sub-dictionaries 'dirs' and 'files' is defined aswell.  This dictionary
	stores the key:value pairs of the directories and files.

	As mentioned before, this function makes use of the 'hashlib.sha256' class by creating an instance of it.

	The result of this function is used to compare two directory trees with each other in order to check, which
	directories and files have either been changed, deleted or added.

	Args:
		path_to_root_dir: A string that represents an absolute path on the local system.

	Returns:
		A dictionary consisting of two sub-dictionaries 'dirs' and 'files'; each of them consisting of 0 or more
		key:value pairs.  Each key is the absolute path to a directory or a file on the local system.  Each
		value is the calculated checksum of that directory or file.

		For example:
			{
				"dirs": {
					"path\\to\\dir": "39084HFHWEFIWEU",
					"path\\to\\dir2\dir10": "39084/"§(BFWEF2"
				},
				"files": {
					"path\\to\\file1.txt\\: "DFGH$RQW()ERU"§(/§",
					"path\\to\\file2.txt\\: "&/"§/G("/GFUIEFHIU"
				}
			}
	"""
	# A file is read in blocks as I might encounter a very large file that may cause problems for an operating system.
	BLOCKSIZE = 65536

	# Defining the Python 'Dict' to store the names of files/directtories and their checksums.
	dirtree_hashes = { "dirs": {}, "files": {} }

	# Walking the directory tree.
	for dirpath, dirnames, filenames in os.walk( path_to_root_dir ):
		# Iterating through the list of found directories in "dirpath".
		for d in dirnames:
			# Creating a new instance of a 'hashlib.sha256' object.
			ho = hashlib.sha256()
			# Generating a checksum by using the name of a directory.
			ho.update( d )
			# Storing the result in the Python 'Dict' 'dirtree_hashes'.
			dirtree_hashes["dirs"][os.path.join( dirpath, d ).partition( path_to_root_dir)[2]] = ho.hexdigest()

		# Iterating through the list of files found in the directory "dirpath".
		for f in filenames:
			# Opening a filename in 'read-binary' mode.
			with open( os.path.join( dirpath, f ), 'rb' ) as file_handle:
				# Creating a new instance of a 'hashlib.sha256' object.
				ho = hashlib.sha256()
				# Reading BLOCKSIZE bytes of the file.  Storing the result in 'buf'.  Essentially a string is stored in buf.
				buf = file_handle.read( BLOCKSIZE )
				# Making sure that the file was not empty.
				while len( buf ) > 0:
					# Generating a checksum by using the read contents of a file.
					ho.update( buf )
					# Reading the next BLOCKSIZE bytes of the file and storing the content as string.
					buf = file_handle.read( BLOCKSIZE )
					# Storing the result in the Python 'Dict' 'dirtree_hashes'.
					# Actually, this is silly because this is done in every iteration of the while loop.  It should be done once the while loop is finished.
					# Need to think about this.  It might turn out as a bug at some point in time of development.
					dirtree_hashes["files"][os.path.join( dirpath, f ).partition( path_to_root_dir )[2]] = ho.hexdigest()

	return dirtree_hashes


def compare_directories_depth( path1, path2 ):
	"""Compares the relative paths of two directories and returns -1, if 'path1' is deeper than 'path2'; 1, if 'path2' is deeper than 'path1' or 0, if both have the same depth.

	This function compares the depth of two relative paths 'path1' and 'path2'.
	Comparison is done by counting the occurences of the "os.sep" in both paths.

	Args:
		path1: A relative path to a directory.
		path2: A relative path to a directory.

	Returns:
		-1, if 'path1' points to a directory deeper than 'path2'.
		0, if both point to directories of the same depth.
		1,  if 'path2' points to a directory deeper than 'path1'.
	"""
	# Counting the occurences of 'os.sep' in both relative paths.
	path1_depth = path1.count( os.sep )
	path2_depth = path2.count( os.sep )

	if path1_depth > path2_depth:
		return -1
	elif path1_depth < path2_depth:
		return 1
	else:
		return 0


def read_file( filepath ):
	"""Returns the content of a file 'filepath' as a string.

	Args:
		filepath: The absolute path to a file.

	Returns:
		Returns the content of a file 'filepath' as a string.
		
	"""
	file_handle = open( filepath, "r" )
	file_content = file_handle.read()
	file_handle.close()

	return file_content


def dir_is_empty( path ):
	"""Returns 'True' is a directory is empty; otherwise 'False'.

	Args:
		path: The absolute path to a directory.
	"""
	if len( os.listdir( path ) ) <= 0:
		return True
	return False


def truncate_file( filepath ):
	"""Truncates a file to the size 0.

	Args:
		filepath: The absolute path to a file.
	"""
	file_handle = open( filepath, "w" )
	file_handle.close()


def write_file( filepath, content="" ):
	"""Writes 'content' to a file 'filepath'.
	
	Opens a file in writing mode and writes the content provided by 'content' to it and closes the file.

	Args:
		filepath: The absolute path to the file.
		content: The string to write to the file 'filepath'.
	"""
	file_handle = open( filepath, "w" )
	file_handle.write( content.encode( "utf-8" ) )
	file_handle.close()


def force_copytree( src_path, dst_path ):
	"""Copies a directory tree 'src_path' to a directory 'dst_path'.  If 'dst_path' exists, it is removed.
	
	This function uses shutil functions to copy a directory free from a source to a destination as defined by the parameters.
	If the destination directory already exists, it is removed.

	Args:
		src_path: The absolute path to the source path from where the entire directory structure should be copied to 'dst_path'.
		dst_path: The absolute path to the destination path to where the entire directory structure of 'src_path' should be copfied to.
	"""
	# Remove the entire destination directory if it exists.
	if os.path.exists( dst_path ):
		shutil.rmtree( dst_path )

	# Copy all contents of the src_path to the dst_path.
	shutil.copytree( src_path, dst_path )


def load_json( filepath ):
	"""Reads JSON from a file 'filepath' and returns a Python 'Dict' that contains the read JSON.
	
	This function opens a text file that contains JSON in reading mode and reads the entire content.
	Then the entire content is converted into a Python dictionary using the module "json" of the Python standard library.

	Args:
		filepath: The absolute filepath to the file to open.

	Returns:
		A Python Dictionary 'json_dict' is returned.
	"""
	json_dict = json.loads( read_file( filepath ) )
	
	return json_dict


def directories_exist( dirs ):
	"""Returns 'True' if a set of specified directories in 'dirs' exist.  Otherwise 'False'.

	Args:
		dirs: Contains one or more relative paths to directories.

	Returns:
		A boolean, whose value is 'True' if all directories specified in 'dirs' have been found, or 'False', if at least
		one of them has not been found.
	"""
	all_dirs_exist = True

	# Test if all directories exist.
	for d in dirs:
		if os.path.exists( os.path.join( os.getcwd(), d ) ):
			continue
		else:
			logging.error( "The directory '%s' does not exist." % d )
			if all_dirs_exist:
				all_dirs_exist = False

	return all_dirs_exist


def files_exist( files ):
	"""Returns 'True' if all required files specified in 'files' exist.	Otherwise 'False' is returned.

	Args:
		files: Contains one or more relative paths to files.

	Returns:
		A boolean, whose value is 'True' is all files specified in 'files' exist, or 'False', if at least
		one doesn't exist.
	"""

	all_files_exist = True

	# Test if all files exist.
	for f in files:
		if os.path.exists( os.path.join( os.getcwd(), f ) ):
			continue
		else:
			logging.error( "The file '%s' does not exist." % f )
			if all_files_exist:
				all_files_exist = False

	return all_files_exist	
