# selenium，手动下载

import time
from sys import path as path2

import requests
from bs4 import BeautifulSoup
from rich import print as rprint
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait

# 将 Service 的路径加入 python 搜索模块的列表
path2.append('c:/users/tian/desktop')

from Service import service, ua

def get_ip1(page=1):
    r = requests.get(f'https://free.kuaidaili.com/free/inha/{page}/', headers={'User-Agent': ua.ua,})
    soup = BeautifulSoup(r.text, 'lxml')
    IP, PORT = soup('td', {'data-title': 'IP'}), soup('td', {'data-title': 'PORT'})
    ips = ['{}:{}'.format(IP[i].text, PORT[i].text) for i in range(len(IP))]
    return ips

def get_ip2(page=1):
    r = requests.get(f'http://www.kxdaili.com/dailiip/1/{page}.html', headers={'User-Agent': ua.ua,})
    ips = []
    for tr in BeautifulSoup(r.text, 'lxml').tbody('tr'):
        ip, port = [td.string for td in tr('td')[:2]]
        ips.append(f'{ip}:{port}')
    return ips

def req(url):
    while True:
        try:
            r = requests.get(url, headers={'User-Agent': ua.ua,}, timeout=5)
            break
        except:
            rprint('[red]连接超时，重连中。。。')
            continue
    soup = BeautifulSoup(r.text, 'lxml')
    return soup

def web(n):
    node_h2 = req(f'https://mrcong.com/page/{n}')('h2')
    names, pages = [h2.a.string for h2 in node_h2], [h2.a.attrs['href'] for h2 in node_h2]
    urls = []
    out = 0
    _ = 0                                                                        # 此变量专门为控制台打印链接序号准备
    for i in range(len(names)):
        if out == 3:
            if len(urls) > 0:
                rprint('[yellow]检测到三条已存在记录，进入下载程序')
            else:
                rprint('[green]无可下载内容')
            break
        name, page = names[i], pages[i]
        node_p = req(page)('p', style='text-align: center;')
        url = [file.a.attrs['href'] for file in node_p[len(node_p)//2:]]
        if service.query('select * from mrcong where name=?', (name,)) == []:       # 判断不存在该条记录
            rprint('{}. {} ==> {}'.format(_, name, '\n'.join(url)))
            _ += len(url)
            urls += url
            id = service.query2('select count(*) from mrcong')[0][0] + 1
            date = today
            url = '&'.join(url)
            service.exec('insert into mrcong(id, date, name, url, page) values (?, ?, ?, ?, ?)', (id, date, name, url, page))
        else:
            out += 1
            rprint(f'[yellow]【{name}】已记录')
    return urls

def local(date):
    result = service.query('select name, url from mrcong where date=?', (date,))
    urls = []
    _ = 0
    for i in result:
        url = i[1].split('&')
        rprint('{}. {} ==> {}'.format(_, i[0], '\n'.join(url)))
        _ += len(url)
        urls += url
    return urls

def search(targets, page=1):
    url = 'https://mrcong.com/page/'
    while True:
        node_h2 = req(url+str(page))('h2')
        titles, pages = [h2.a.string for h2 in node_h2], [h2.a.attrs['href'] for h2 in node_h2]
        for title in titles:
            flag = False
            for target in targets:
                if title.find(target) != -1:
                    link = pages[titles.index(title)]
                    node_p = req(link)('p', style='text-align: center;')
                    url = [i.a.attrs['href'] for i in node_p[len(node_p)//2:]]
                    rprint('{}: {} ==>\n{}'.format(title, link, url))
                    flag = True
            if flag:
                choice = input('是否继续：')
                if choice == '' or choice in 'yY':
                    page += 1
                    continue
                elif choice in 'nN':
                    break
                else:
                    rprint(f'输入有误，可重新运行，从第 {page+1} 页继续')
                    break
        else:
            rprint(f'第 {page} 页检索完毕，继续下一页')
            continue
        break

def save_rar(browser, urls):
    for url in urls:
        url = url[0]
        if '&' in url:
            continue        # 分卷压缩的跳过
        while True:         # 防止因为网络原因打开网页失败
            try:
                browser.get(url)
                WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.XPATH, '//span[@class="file-item-checkbox"]')))
                break
            except:
                continue
        browser.find_element(By.XPATH, '//span[@class="file-item-checkbox"]').click()       # 选中文件
        browser.find_element(By.XPATH, '//div[@class="file-select-save"]').click()          # 点击 Save to my TearBox
        try:
            WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//div[@class="common-folder-item"]')))   # 等待保存对话框加载
        except:
            save_rar(browser, [(url,)])    # 送该 url 入轮回
            return      # 轮回中成功保存，则提前结束，进行下一个文件
        browser.find_element(By.XPATH, '//div[@class="create-confirm btn "]').click()       # 点击 Yes
        try:        # 检查是否保存成功
            WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.XPATH, '//div[@class="share-save-success-body"]')))
        except:
            save_rar(browser, [(url,)])    # 送该 url 入轮回

def dl(date):
    with open('d:/data/terabox_cookies.txt', 'r', encoding='utf-8') as f:
        cookies = eval(f.read())
    options = Options()
    options.add_argument('--start-maximized')
    options.page_load_strategy = 'eager'
    prefs = {'download.default_directory': 'e:/'}
    options.add_experimental_option('prefs', prefs)

    browser = webdriver.Chrome(options=options)
    browser.get('https://www.terabox.com/')
    browser.delete_all_cookies()
    for cookie in cookies:
        browser.add_cookie(cookie)
    # 有选择性的保存文件
    # urls = []
    # for name in ["XIUREN%", "YouMi%", "XiaoYu%", 'HuaYang']:
    #     url = service.query('select url from mrcong where date=? and name like ?', (date, name))     # urls like [(), ()]
    #     urls += url
    urls = service.query('select url from mrcong where date=?', (date,))
    print(urls)
    save_rar(browser, urls)

    browser.get('https://www.terabox.com/main?category=all')
    next = input('回车关闭')

if __name__ == '__main__':
    today = time.strftime('%Y.%m.%d', time.localtime())
    choice = input('输入选择（回车=>网络，se=>搜索，其他=>本地）：')
    service.path = 'd:/data/mrcong.db'
    date = today

    if choice == '':
        urls = []
        n = 1
        while True:
            part = web(n)
            urls += part
            if len(part) >= 20:
                print(f'检索第 {n+1} 页')
                n += 1
            else:
                break
    elif choice == 'se':
        titles = []
        while True:
            title = input('输入：')
            titles.append(title)
            if titles[-2:] == ['', '']:
                break
        search(titles[:-2])
    else:
        while True:
            day = input('输入日期（回车==>今天，-n==>过去第n天）：\n')
            if day == '':
                break
            elif type(eval(date)) == int and eval(date) < 0:
                date = time.strftime('%Y.%m.%d', time.localtime(time.mktime(time.localtime()) + eval(date)*24*60*60))
                break
            else:
                print('输入有误，重新输入')
                continue
        urls = local(date)

    total_num = service.query2('select count(*) from mrcong')[0][0]
    rprint(f'\n[green]【数据库中现共有 {total_num} 条数据】')

    if len(urls) > 0:
        next = input('回车开始下载：')
        dl(date)
    else:
        print('无需下载文件')