import tkinter as tk
from concurrent.futures import ThreadPoolExecutor
import re
import csv
import requests
import logging

logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler("log.txt",encoding='utf-8')
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)

window = tk.Tk()  # 主窗口
window.title('视频下载:https://www.xvideos.com/')  # 窗口标题
window.geometry('400x175')  # 窗口尺寸
row3_dow = False
row6_out = False

row0 = tk.Frame()
row0.pack(fill="x")
tk.Label(row0, text='爬取地址').pack(side=tk.LEFT)
crawling = tk.StringVar()
tk.Entry(row0, textvariable=crawling).pack(side=tk.LEFT)

tk.Checkbutton(row0, text='执行下载', command=lambda: dow()).pack(side=tk.RIGHT)
tk.Checkbutton(row0, text='保存CSV', command=lambda: out()).pack(side=tk.RIGHT)

row1 = tk.Frame()
tk.Label(row1, text='服务地址').pack(side=tk.LEFT)
service = tk.StringVar()
tk.Entry(row1, textvariable=service).pack(side=tk.LEFT)

row2 = tk.Frame()
tk.Label(row2, text='保存地址').pack(side=tk.LEFT)
save = tk.StringVar()
tk.Entry(row2, textvariable=save).pack(side=tk.LEFT)

row4 = tk.Frame()
tk.Label(row4, text='密　　钥').pack(side=tk.LEFT)
key = tk.StringVar()
tk.Entry(row4, textvariable=key).pack(side=tk.LEFT)

row5 = tk.Frame()
row5.pack(fill="x")
tk.Label(row5, text='代　　理').pack(side=tk.LEFT)
proxy = tk.StringVar()
tk.Entry(row5, textvariable=proxy).pack(side=tk.LEFT)
tk.Button(row5, text="开始执行", command=lambda: setup_start(crawling.get(), service.get(
), save.get(), key.get(), proxy.get(), output.get())).pack(side=tk.RIGHT)

row6 = tk.Frame()
tk.Label(row6, text='输出路径').pack(side=tk.LEFT)
output = tk.StringVar()
tk.Entry(row6, textvariable=output).pack(side=tk.LEFT)


def dow():
    global row3_dow
    row3_dow = not row3_dow
    if row3_dow:
        row1.pack(fill="x")
        row2.pack(fill="x")
        row4.pack(fill="x")
    else:
        row1.pack_forget()
        row2.pack_forget()
        row4.pack_forget()


def out():
    global row6_out
    row6_out = not row6_out
    if row6_out:
        row6.pack(fill="x")
    else:
        row6.pack_forget()


def setup_start(crawling, service, save, key, proxy, output):
    proxies = {
        'http': 'http://'+proxy,
        'https': 'https://'+proxy
    }
    response = requests.get(crawling, proxies=proxies)
    it = re.finditer(r"<a[^>]*>([^<]*)</a>", response.text)
    urls = []
    for match in it:
        if 'title=' in match.group() and '/video' in match.group():
            urls.append(match.group())

    ia = []
    for a in urls:
        for b in (re.finditer(r'(?<=<a href=\").*?(?=\" title=)', a)):
            ia.append("https://www.xvideos.com" + b.group(0))

    executor = ThreadPoolExecutor(max_workers=10)
    for item in ia :
        future = executor.submit(getUrl, item, proxies)

def getUrl(url, proxies):
    response = requests.get(url, timeout=5, proxies=proxies)
    name = re.findall(
        '(?<=html5player.setVideoTitle\\(\').*?(?=\'\\);)', response.text)[0]
    url = re.findall(
        '(?<=html5player.setVideoUrlHigh\\(\').*?(?=\'\\);)', response.text)
    if (len(url) == 0):
        url = re.findall(
            '(?<=html5player.setVideoUrlLow\\(\').*?(?=\'\\);)', response.text)[0]
    else:
        url = url[0]
    logger.info(name+"|"+url)
    if service.get().strip() != '' and save.get().strip() != '' and key.get().strip() != '' and url.strip() != '':
        aria2(service.get(), url, key.get(), name, save.get())
        print("开始下载:"+name)
    if output.get().strip() != '':
        getData_start(name, url, output)

def getData_start(name, url, path):
    # 写入CSV文件
    with open(path, 'a+', newline='', encoding='utf-8') as f:
        csv_write = csv.writer(f)
        data_row = [name, url]
        csv_write.writerow(data_row)


def aria2(dowPath, dowUrl, key, name, path):
    # aria2地址
    ariaurl = dowPath
    # 需下载文件的真实
    dlurl = dowUrl
    # 落地的文件名，
    fn = name
    # 本地目录
    dn = path
    # jsondata  rpc调用的数据头（固定部分）
    jsondata = {
        "jsonrpc": "2.0",
        "id": "QXJpYU5nXzE1NDgzODg5MzhfMC4xMTYyODI2OTExMzMxMzczOA==",
    }
    # url  是下载文件的链接，fanme和fdir分别为本地文件名和目录
    reqdata = jsondata
    # aria  增加下载的方法
    reqdata["method"] = "aria2.addUri"
    reqdata['params'] = ['token:'+key, [], {}]
    reqdata['params'][1] = [dowUrl]
    reqdata['params'][2] = {"out": name, "dir": path}
    ret = requests.post(ariaurl, json=reqdata)
    return(ret.status_code)


window.mainloop()
