#===============================================================================
# Id :cutescan.y
# Author:Yaseng
#===============================================================================
import   sys, urllib2, time, os , Queue, msvcrt, threading,re
from json import scanner
 

 
def cslogo():
    print '''
     ,--^----------,--------,-----,-------^--,
     | |||||||||   `--------'     |          O ..
     `+---------------------------^----------|
       `\_,-------, ___@ PlayWeb   ______|
         / XXXXXX /`|     /
        / XXXXXX /  `\   /
       / XXXXXX /\______(
      / XXXXXX /        
     / XXXXXX /
    (________(    For example:
     `------'         cutescan.py  yaseng.me 
    
    ''' 
 
# show message
def msg(text, type=0):
    if type == 0: 
       str_def = "[*]" 
    elif  type == 1: 
       str_def = "[+]"
    else:
       str_def = "[-]";
    print str_def + text;
    
# readfile to array
def file_to_arr(file):
    arr=[]
    f = open(file)
    for line in f:
      arr.append(line.strip())
    return arr    
 

class ThreadGetKey(threading.Thread):
    def run(self):
        try:           
            chr = msvcrt.getch()
            if chr == 'q':
                print "stopped by your action ( q )"
                os._exit(1)
        except:
            os._exit(1) 

def cutescan(host):
    msg("Cutescan :" + host) 
    path_list1 = file_to_arr("data\\path1.txt")
    path_list2 = file_to_arr("data\\path2.txt")
    suffix_list= file_to_arr("data\\suffix.txt")
    # Compile the  dictionary
    path_list1+=compile_domain(host)
    global file_queue
    global path_list
    global url_list
    global g_error_page_size
    path_list = []
    url_list=[]
    file_queue = Queue.Queue()
    g_error_page_size=error_page_szie("http://"+host)
    for path2 in path_list2 :
          if len(path2) > 0:
             path_list.append(path2)
          for path1 in path_list1 :
           path_list.append(path1 + path2 );
    #print path_list
    scandir("http://" + host+"/")
    url_list.insert(0,"http://" + host+"/")
    for url in url_list:
      for path in path_list :
        for suffix in suffix_list :
            file_queue.put(url + path+"." + suffix) 
    msg("Target:%s %d available path found  %d url list  compiled " % (host,len(url_list),file_queue.qsize()),1)
    for i in range(30):
          Scaner().start() 
     
def  scandir(curl):
    for path in path_list :
      url = curl  + path + "/"
      #msg(url)
      try:
          r = urllib2.urlopen(url, timeout=10)
          size = dict(r.headers).get('content-length', 0)
          if g_error_page_size != size :
           msg("Path:" + url + "   --->%d" % r.getcode(), 1); 
           url_list.append(url)
           #scandir(url+"/")  # Recursive  fuzz path 
          else :
              continue
      except urllib2.HTTPError as hr:
            # msg("Path:"+url+"   --->%d" % hr.code,2);
            if(hr.code == 403) :
                url_list.append(url)
                scandir(url+"/")
            continue     

# compile domain to  path_list   exp : pentest.yaseng.com.cn => ['pentest.yaseng.com.cn', 'yaseng.com.cn', 'pentest', 'yaseng']
def  compile_domain(domain):
  path_list = []
  url_re = re.compile('.(com|net|org|cc|gov|edu|cn|me|info|hk|tv|asia).*')  # top domain ....
  rq=url_re.search(domain);
  if rq  is None :
      return []
  ret = url_re.sub('', domain)
  domain_arr = ret.split('.')
  path_list.append(domain)
  domain_len=len(domain_arr)
  if domain_len > 0 :
    if domain_len > 1 :
      path_list.append(domain_arr[domain_len-1]+rq.group())
    path_list+=domain_arr
  return path_list

def error_page_szie(domain):
   try:
    ret=urllib2.urlopen(domain+"/cutescan9527.html", timeout=10)
   except :
       return 0
   return  dict(ret.headers).get('content-length', 0)

class Scaner(threading.Thread):
    def __init__(self):
      threading.Thread.__init__(self)
    def run(self):
      while 1:
         if file_queue.empty() == True:
             break
         url =  str(file_queue.get())
         rfile=""
         
         try:
          #msg(url)
          rfile = urllib2.urlopen(url, timeout=10)
          size = dict(rfile.headers).get('content-length', 0)
          if g_error_page_size != size :
           msg("File %s found !!! Type:%s Size: %.4f m" % (url, rfile.headers['Content-Type'], float(size) / (1024 * 1024)) , 1);
          else :
              continue
         except :
            continue
         
         
if __name__ == '__main__':
    
   cslogo()
   if len(sys.argv) > 1 :
     site_arr=file_to_arr(sys.argv[1]);
   for site in site_arr :
       try :
           urllib2.urlopen("http://"+site, timeout=10)
           cutescan(site)
       except :
       	   msg(site+" look unavailable ")
           continue   
         
         
         
         
         
 
