#export PATH=$PATH:/home/viuviu/gecko
import requests  
from lxml import html  
import urlparse  
import collections

from BeautifulSoup import BeautifulSoup
from selenium import webdriver
import pandas as pd
import openpyxl as op
import re
import urllib
from urlparse import urljoin
from urlparse import urlparse
import time
import sys

print str(sys.argv[1])
parent_url = str(sys.argv[1])

writed_links = list()
buffer_links = list ()
visited_links = list()
all_links = list()
level = 0

def stopped_link(url):
    if '#' in url:
        print "STOP"
        return True
    if "javascript:" in url:
        print "STOP"
        return True
    if "mailto:" in url:
        print "STOP"
        return True	 	    
	return False
	    

def file_len(filename):
    with open(filename) as f:
        for i, l in enumerate(f):
            pass
    return i + 1

def write_links(filename):
    global writed_links
    filename = str(filename) + '.txt'
    with open(filename, 'a') as file:
        for l in writed_links:
            try:
                file.write(str(l).encode('utf-8').strip())
                file.write('\n')
            except:
				pass
    file.close()
    del writed_links[:]
    return
    
def read_links(filename):
    global buffer_links
    count = 0
    filename = str(filename) + '.txt'
    with open(filename, 'r') as f:
        for line in f:
            buffer_links.append(line.split())
            count += 1
    return count

def log_to_file(text):
    with open("all.res",'a') as file:
        file.write(str(text))
        file.write('\n')
    file.close()
    return   

def get_current_domain(url):
    parsed_uri = urlparse(url)
    domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
    return domain   
    
def tags_to_file(inputs,url,filename):
    filename = str(filename) + '.res'
    with open(filename, 'a') as file:
        file.write(str(url))
        file.write('\n')
    with open(filename, 'a') as file:
        for i in inputs:
            file.write(str(i))
            file.write('\n')
    with open(filename, 'a') as file:
        file.write("-------------------------------------------------")
        file.write('\n')
    file.close()    
    
def nextLevel(current_level,depth,driver):
    global A
    global level
    global writed_links
    global visited_links
    global all_links
    global parent_url
    print "------------------------------------------------------------"
    level_count = read_links(current_level)
    print "!!! Current level = ",current_level
    print "Level count = ",level_count
    for url in buffer_links:
         if stopped_link(str(url)) != True:
            url = ''.join(url)
            print url
            domain_url = get_current_domain(url)
            domain_parent_url = urlparse(str(parent_url))
            print "current domain ",domain_url
            print "parrent domain ",domain_parent_url.netloc
            if (str(url) not in visited_links) and (str(domain_parent_url.netloc) in str(domain_url)):
                print "GOTO >>> ",str(url)
                log_to_file("GOTO >>>")
                log_to_file(url)
                try:
                    driver.get(url)
                
                    visited_links.append(str(url))
                
                    html = driver.page_source.encode('utf-8')
                    soup = BeautifulSoup(html)
                    tags = soup('a')
                
                    inputs = soup('input')
                    tags_to_file(inputs,driver.current_url,"inputs")
                
                    try:
                        for a in tags:
                            a = a.get('href',None)
                            current_domain = (driver.current_url)
                            k = urljoin(current_domain,a)
                            if k not in writed_links:
                                writed_links.append(k)
                                print k
                                log_to_file(k)
                    except KeyError:
		                print "KeyError"
                except:
                    pass
    next_level = current_level + 1
    print "!!! Next level = ",next_level    
    write_links(next_level)
    del buffer_links[:]    


def getLinks(url,driver,depth):
    global A
    global level
    global writed_links
    global buffer_links
    global visited_links
    global all_links
    global parent_url
    if str(url) not in visited_links:
        driver.get(url)
        time.sleep(12)
        visited_links.append(url)
        print "GET PAGE CONTENT..." 
        log_to_file("GET PAGE CONTENT...")  
        
        html = driver.page_source.encode('utf-8')
        soup = BeautifulSoup(html)
        
        tags = soup('a')
        
        inputs = soup('input')
        tags_to_file(inputs,driver.current_url,"inputs")
        for a in tags:
            a = a.get('href',None)
            current_domain = (driver.current_url)
            k = urljoin(current_domain,a)
            print k
            if k not in visited_links:
                writed_links.append(k)
                log_to_file(k)
    
    write_links("0")
    z=0
    
    while z < depth:
        nextLevel(z,depth,driver)
        z += 1
        time.sleep(1)
    
    driver.close()
    driver.quit() 

def init_browser():
    chrome_options = webdriver.ChromeOptions()
    chrome_options.add_argument('--user-data-dir=/home/viuviu/DorksScan/crawly/Profile')
    #chrome_options.add_argument('--proxy-server=socks5://127.0.0.1:9050')
    chrome_options.add_argument('--disable-xss-auditor')
    chrome_options.add_argument('--disable-web-security')
    chrome_options.add_argument('--enable-devtools-experiments')
    chrome_options.add_argument('--disable-images')
    chrome_options.add_argument('--disable-background-mode')
    browser = webdriver.Chrome('/usr/bin/chromedriver')
    browser = webdriver.Chrome(chrome_options=chrome_options)
    browser.set_page_load_timeout(30)
    return browser

       
#if __name__ == "__main__":

visited_links.append("https://download.cdn.yandex.net/element/firefox/homeset/ru/homeset.xpi")
visited_links.append("https://www.lyst.com/account/logout/")

#visited_links.append("/?edit=1#open=_topnews")
visited_links.append("javascript:;")
#visited_links.append("mailto:info@soulzoneholistics.co.uk")
visited_links.append("javascript:void();")    
visited_links.append("javascript:void(0);")
visited_links.append("javascript:void(0)") 
visited_links.append("javaScript:void(0);")
visited_links.append("https://kywha.harvestapp.com/account/logout")
  

#fp = webdriver.FirefoxProfile('/home/viuviu/.mozilla/firefox/j3vpvwy4.test2bb')
#fp = webdriver.FirefoxProfile('/home/viuviu/.mozilla/firefox/dw3jgtcy.for-tor')


driver = init_browser()
#driver = webdriver.Firefox(fp)
#driver = webdriver.PhantomJS()
getLinks(parent_url,driver,5)

    
