from scapy.all import *
import threading, time, re, os
from selenium import webdriver
import shutil
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
import datetime
from selenium.webdriver.common.keys import Keys
import SSHConnection
import random
## ========================================================================== ##
## ------------------------ USER CONFIGURATION FLAGS ------------------------ ##
## ========================================================================== ##

### 本机设置
## 时间设置
access_pages=10               # 每个网页收集次数
local_catch_traffic_time=31   # 本机收集流量时间
sniff_to_open=5               # 开始收集流量到开始打开网页时间
page_duration=30              # 访问网页时长
shutdown_wait=5               # 关闭网页后等待时间

rand=random.randint(2,11)

## 其他设置
# 网址文件
# websites='websites_0823_all.txt'
# 本机IP
local_IP='XXX.XXX.XXX.XXX'
# 本机网卡
local_iface='Intel(R) Wireless-AC XXXX XXXMHz'

server_IP='XXX.XXX.XXX.XXX'

### 服务器设置
## 网关
gateway_catch_traffic_time=32 # 网关每次抓流量时长
gateway_dict={'host':'211.65.197.199','port':2222,'username':'work','pwd':'1qaz2wsx'}
#gateway_dict={'host':'127.0.0.1','port':9511,'username':'mason','pwd':'123456'}
gateway_path='/media/work/6ac23a2a-da54-4b0f-904f-b0ca33560d98/tor_video/exp0107'

IPfilter = 'host ' + server_IP + ' and tcp'
def catch_traffic(*website):
    PTKS = sniff(filter=IPfilter, iface="Intel(R) Wireless-AC 9560 160MHz", timeout=31)
    pcapname = foldname + '/' + datetime.datetime.now().strftime('%Y%m%d_%H%M_%S_') + website[1] + '.pcap'
    wrpcap(pcapname, PTKS)

# 发送命令进程
def SSH_cmd(*args):#gatewaySSH,gateway_catch_traffic_time,gpage_path,sudo=False
    if(args[3]==False):
        cmd = 'timeout '+str(args[1])+' tcpdump -s0 -G '+str(args[1])+' -w '+args[2]+'/%Y+%m%d_%H%M_%S.pcap'
        args[0].cmd(cmd)
    else:
        cmd = 'sudo timeout ' + str(args[1]) + ' tcpdump host ' + server_IP + ' -s0 -G ' + str(args[1]) + ' -w ' + args[
            2] + '/%Y+%m%d_%H%M_%S.pcap'
        args[0].cmd(cmd, sudo=True)

show_interfaces()

def create_driver():
    ##Firefox prfile object
    firefoxProfile = FirefoxProfile()

    # Firefox proxy
    proxy = '127.0.0.1:9050'
    ip, port = proxy.split(":")
    port = int(port)

    settings = {
        'network.proxy.type': 1,
        'network.proxy.socks': ip,
        'network.proxy.socks_port': port,
    }
    for key, value in settings.items():
        firefoxProfile.set_preference(key, value)
    ##Disable browser cache
    firefoxProfile.set_preference("browser.cache.memory.enable", False)
    firefoxProfile.set_preference("browser.cache.disk.enable", False)
    firefoxProfile.set_preference("browser.cache.check_doc_frequency", 1)
    firefoxProfile.set_preference("network.http.use-cache", False)
    firefoxProfile.set_preference("browser.sessionhistory.max_total_viewers", 3)
    firefoxProfile.set_preference("network.dns.disableIPv6", True)
    firefoxProfile.set_preference("Content.notify.interval", 750000)
    firefoxProfile.set_preference("content.notify.backoffcount", 3)

    firefoxProfile.set_preference("network.dns.blockDotOnion", False)
    firefoxProfile.set_preference("network.proxy.socks_remote_dns", True)

    firefoxProfile.update_preferences()
    firefox = webdriver.Firefox(firefoxProfile)
    return firefox

# gateway和server建立连接
gatewaySSH=SSHConnection.SSHConnection(gateway_dict)
gatewaySSH.connect()

page="http://tuberyps2pn6dor6h47brof3w2asmauahhk4ei42krugybzzzo55klad.onion/watch?v=5Jdvx4YNmQM"

for i in range(2000):     ##循环的次数即是收集的次数
    pattern = r'(http://)(.*?)\.onion/watch\?v=(.*)'
    s = re.search(pattern, page)
    foldname='./pcap/' + datetime.datetime.now().strftime('%Y%m%d_%H%M_%S_') + s.group(3)
    isExists = os.path.exists(foldname)
    if (isExists):
        shutil.rmtree(foldname)
    os.mkdir(foldname)
    # 在服务器和网关端生成文件夹
    gpage_path = gateway_path + '/' + datetime.datetime.now().strftime('%Y%m%d_%H%M_%S_') + s.group(3)
    gatewaySSH.cmd('sudo mkdir ' + gpage_path, sudo=True)

    gateway_thread = threading.Thread(target=SSH_cmd, args=(gatewaySSH, gateway_catch_traffic_time, gpage_path, True))
    catch_traffic_thread = threading.Thread(target=catch_traffic, args=(i, s.group(3)))
    #打开网页并操作开始
    driver = create_driver()
    driver.switch_to.window(driver.window_handles[0])
    try:
        driver.get(page)
        searchbar=driver.find_element_by_xpath('//*[@id="player"]').click()
        page = driver.find_element_by_xpath(
            '/html/body/div/div[2]/div[4]/div[3]/div/div[' + str(rand) + ']/div/a').get_attribute('href')

    except:
        print(page + '出现异常，已重新加载.')
        page = "http://tuberyps2pn6dor6h47brof3w2asmauahhk4ei42krugybzzzo55klad.onion/watch?v=5Jdvx4YNmQM"
        driver.close()
        continue

    else:
        #本机和网关开始收集流量
        gateway_thread.start()
        catch_traffic_thread.start()

        time.sleep(30)
        driver.close()
        time.sleep(2)

        #searchwords='video'
        # searchbar.send_keys(searchwords)
        # searchbar.send_keys(Keys.ENTER)
