<?php
/**
*
*===================================================================
*
*  phpBB Code Repository -- Extension File
*-------------------------------------------------------------------
*	Script info:
* Version:		2.0.0 - "Xerxes" - Alpha 1
* SVN ID:		$Id: extension_crawldir.php 3 2009-07-31 02:25:51Z sTraTosphere.programming $ 
* Copyright:	(c) 2008, 2009 | Obsidian
* License:		http://opensource.org/licenses/gpl-2.0.php | GNU Public License v2
* Package:		Includes
*
*===================================================================
*
*/

/**
* @ignore
*/
if (!defined('IN_PHPBB'))
{
    exit;
}

/**
 * Projects System - Crawldir Extension class,
 * 		Crawldir and caching framework extension for phpBB Code Repository 2.0.x "Xerxes" 
 * 
 * @author Obsidian
 * @copyright (c) 2008, 2009 | Obsidian
 * @license http://opensource.org/licenses/gpl-2.0.php | GNU Public License v2
 */
class projects_extension_crawldir extends projects_extension_core
{

	/**
	 * Constructor method for class projects_extension_crawldir
	 */
	public function __construct() { }
	
	/**
	* @function 	- Crawldir
	* @version 		- 1.4.2
	* @author	  	- {@link http://www.infinityhouse.org Obsidian}
	* @description	- PHP function for getting the contents of a directory (like scandir), and getting the contents of every subdirectory found (unlike scandir)..
	* @author-notes	- 
	* 				Only let it crawl the directory as necessary. May cause heavy server strain if used constantly.
	* 				Improved slightly in order to better manage bad situations. If directory is invalid, it will return constant DIR_INVALID; no files, constant NO_FILES_FOUND.  Be prepared!
	* 				This function will ignore files and directories specified in in the second and third params, but you /must/ be sure and to have the locations relative to the running script.
	*
	* @license:		- {@link http://opensource.org/licenses/gpl-2.0.php GNU Public License v2}
	* @copyright   	- (c) 2008, 2009 | Obsidian
	*
	* @param		- string  $path - Filepath to run Crawldir on
	* @param	 	- array  $ignore_dirs - Directories to ignore in the crawl.  This means the contents of such directories will also be ignored.
	* @param	 	- array  $ignore_files - Files to ignore in the crawl.
	*
	* @return		- mixed - Array of all the results, or a constant for returning errors encountered.
	*/
	public function crawldir($path, array $ignore_dirs = false, array $ignore_files = false)
	{
		if (!@file_exists($path) || @is_link($path) || !@is_dir($path) || !@is_readable($path))
		{
			return DIR_INVALID; 
		}
		$return['dirs'][] = '/';
		$homepath = $path;
		$default = $return;
		$scan = @scandir($path);
		foreach($scan as $key => $item)
		{
			$filepath = $path . '/' . $item;
			$relpath = '/' . $item;
			if($item == '.' || $item == '..' || $item == '.svn' || @is_link($filepath))
			{
				continue; 
			}
			if(@is_dir($filepath))
			{
				if(in_array(($filepath), $ignore_dirs))
				{
					continue;
				}
				$subdirs[] = $filepath;
				$return['dirs'][] = $relpath;
				$return['subdirs'][0][] = $relpath;
			}
			else
			{
				if(in_array(($filepath), $ignore_files))
				{
					continue;
				}
				$return['files'][0][] = $relpath;
			}
		}
		if($return === $default)
		{
			return NO_FILES_FOUND;
		}

		//@note: If anyone knows a way around having to use an incrementer in this,
		//			 and it's just as effective, LET ME KNOW.
		$i = 0;  
		while(@sizeof($subdirs) > 0)
		{
			$scan = @scandir($subdirs[$i]);
			foreach($scan as $key => $item)
			{
				$filepath = $subdirs[$i] . '/' . $item;
				$relpath = substr($filepath, strlen($homepath));
				if($item == '.' || $item == '..' || $item == '.svn' || @is_link($filepath))
				{ 
					continue; 
				}
				if(@is_dir($filepath))
				{
					if(in_array(($filepath), $ignore_dirs))
					{
						continue;
					}
					$subdirs[] = $filepath;
					$return['dirs'][] = $relpath;
					$return['subdirs'][$i + 1][] = $relpath;
				}
				else
				{
					if(in_array(($filepath), $ignore_files))
					{
						continue;
					}
					$return['files'][$i + 1][] = $relpath;
				}
			}
			unset($subdirs[$i]);
			$i++;
		}
		return ($return !== $default) ? $return : NO_FILES_FOUND;
	}
	
	/**
	* Obtains a cached version of the Project's filelist, or will cache new if no cached set is present/invalid.
	* 
	* @param array &$dirs - List of directories found
	* @param array &$files - List of files found
	* @param array &$subdirs - List of subdirs, used for ID matching
	* @param integer $cache_days - How long to store the filelist data for
	*/
	public function load_filelist(array &$dirs, array &$files, array &$subdirs, $cache_days = 5)
	{
		global $cache, $user;
		if (($filelist = $cache->get('_proj_' . projects::$project_data['project_tag'])) === false)
		{
			$source_path = projects::$projects_path . '/' . projects::$project_data['project_tag'];
			
			$ignore_files = array('.htaccess');
			$this->build_ignore_list($ignore_files, $source_path);
			$ignore_dirs = $this->build_ignore_list(array('cgi_bin')); 
			$this->build_ignore_list($ignore_dirs, $source_path);
			// ^-- Change the above four (4) lines at your own leisure.  :P
			$cache_length = 86400 * (int) $cache_days;
			
			$filelist = $this->crawldir($source_path, (isset($ignore_dirs) ? $ignore_dirs : false), (isset($ignore_files) ? $ignore_files : false));
			if($filelist === DIR_INVALID)
			{
				projects::error($user->lang['SOURCE_DIR_DOA']);
			}
			if($filelist === NO_FILES_FOUND)
			{
				projects::error($user->lang['NO_FILES_FOUND_IN_DIR']);
			}
			
			$cache->put('_proj_' . projects::$project_data['project_tag'], $filelist, $cache_length);
		}
		$dirs = isset($filelist['dirs']) ? $filelist['dirs'] : false;
		$files = isset($filelist['files']) ? $filelist['files'] : false;
		$subdirs = isset($filelist['subdirs']) ? $filelist['subdirs'] : false;
	}
	
	/**
	* Obtains the Project's filelist.  Ignores caching entirely!
	* 
	* @param array &$dirs - List of directories found
	* @param array &$files - List of files found
	* @param array &$subdirs - List of subdirs, used for ID matching
	*/
	public function filelist(array &$dirs, array &$files, array &$subdirs)
	{
		global $user;
		$source_path = projects::$projects_path . '/' . projects::$project_data['project_tag'];

		$ignore_files = array('.htaccess');
		$this->build_ignore_list($ignore_files, $source_path);
		$ignore_dirs = $this->build_ignore_list(array('cgi_bin')); 
		$this->build_ignore_list($ignore_dirs, $source_path);
		// ^-- Change the above four (4) lines at your own leisure.  :P
		
		$filelist = $this->crawldir($source_path, (isset($ignore_dirs) ? $ignore_dirs : false), (isset($ignore_files) ? $ignore_files : false));
		if($filelist === DIR_INVALID)
		{
			projects::error($user->lang['SOURCE_DIR_DOA']);
		}
		if($filelist === NO_FILES_FOUND)
		{
			projects::error($user->lang['NO_FILES_FOUND_IN_DIR']);
		}

		$dirs = isset($filelist['dirs']) ? $filelist['dirs'] : false;
		$files = isset($filelist['files']) ? $filelist['files'] : false;
		$subdirs = isset($filelist['subdirs']) ? $filelist['subdirs'] : false;
	}
	
	/**
	* Constructs a list of files/directories to ignore and prepends the source path to their location, if you're too lazy to do it yourself...
	*
	* @param array &$ignored_items - Referenced var, used to build the ignore list
	* @param string $source_path - The source path to prepend.  Duh.
	*/
	public function build_ignore_list(array &$ignored_items, $source_path)
	{
		if(!is_array($ignored_items))
		{
			return false;
		}
		foreach($ignored_items as $key => $ignored_item)
		{
			$ignored_items[$key] = $source_path . '/' . $ignored_item;
		}
	}


}
