# 下载 https://anonfiles.com/

import random, time
from os import chdir, system

import requests
from bs4 import BeautifulSoup
from rich import print as rprint

import sys
sys.path.append('../')
from Service import service, ua


def get_ip1(page=1):
    r = requests.get(f'https://free.kuaidaili.com/free/inha/{page}/', headers={'User-Agent': ua.ua,})
    soup = BeautifulSoup(r.text, 'lxml')
    IP, PORT = soup('td', {'data-title': 'IP'}), soup('td', {'data-title': 'PORT'})
    ips = ['{}:{}'.format(IP[i].text, PORT[i].text) for i in range(len(IP))]
    return ips

def get_ip2(page=1):
    r = requests.get(f'http://www.kxdaili.com/dailiip/2/{page}.html', headers={'User-Agent': ua.ua,})
    ips = []
    for tr in BeautifulSoup(r.text, 'lxml').tbody('tr'):
        ip, port = [td.string for td in tr('td')[:2]]
        ips.append(f'{ip}:{port}')
    return ips

def req(url):
    while True:
        try:
            ip = random.choice(ips)
            r = requests.get(url, headers={'User-Agent': ua.ua,}, proxies={'http': ip}, timeout=5)
            break
        except:
            rprint('[red]连接超时，重连中。。。')
            ips.remove(ip)
            print(f'(移除 IP：{ip}，剩余 {len(ips)} 个IP)')
            continue
    soup = BeautifulSoup(r.text, 'lxml')
    return soup

def dl(url, path='d:/'):
    if url.startswith('https://anonfiles.com/'):
        link = req(url).find('a', id='download-url')['href']
        name = link.split('/')[-1]
    
        chdir('C:\Program Files (x86)\Internet Download Manager')
        system(' '.join(['IDMan.exe', '/d', link, '/p', path, '/s']))
        rprint(f'[green]{name} 添加到下载队列')
    elif url.startswith('https://gofile.io/'):
        print(url)

def web(n):
    node_h2 = req(f'https://mrcong.com/page/{n}')('h2')
    names, pages = [h2.a.string for h2 in node_h2], [h2.a.attrs['href'] for h2 in node_h2]
    urls = []
    out = 0
    _ = 0                                                                        # 此变量专门为控制台打印链接序号准备
    for i in range(len(names)):
        if out == 3:
            if len(urls) > 0:
                rprint('[yellow]检测到三条已存在记录，进入下载程序')
            else:
                rprint('[green]无可下载内容')
            break
        name, page = names[i], pages[i]
        node_p = req(page)('p', style='text-align: center;')
        url = [file.a.attrs['href'] for file in node_p[len(node_p)//2:]]
        if service.query('select * from mrcong where name=?', (name,)) == []:       # 判断不存在该条记录
            rprint('{}. {} ==> {}'.format(_, name, '\n'.join(url)))
            _ += len(url)
            urls += url
            id = service.query2('select count(*) from mrcong')[0][0] + 1
            date = time.strftime('%Y.%m.%d', time.localtime())
            url = '&'.join(url)
            service.exec('insert into mrcong(id, date, name, url, page) values (?, ?, ?, ?, ?)', (id, date, name, url, page))
        else:
            out += 1
            rprint(f'[yellow]【{name}】已记录')
    return urls

def local(date):
    result = service.query('select name, url from mrcong where date=?', (date,))
    urls = []
    _ = 0
    for i in result:
        url = i[1].split('&')
        rprint('{}. {} ==> {}'.format(_, i[0], '\n'.join(url)))
        _ += len(url)
        urls += url
    return urls

def search(targets, page=1):
    url = 'https://mrcong.com/page/'
    while True:
        node_h2 = req(url+str(page))('h2')
        titles, pages = [h2.a.string for h2 in node_h2], [h2.a.attrs['href'] for h2 in node_h2]
        for title in titles:
            flag = False
            for target in targets:
                if title.find(target) != -1:
                    link = pages[titles.index(title)]
                    node_p = req(link)('p', style='text-align: center;')
                    url = [i.a.attrs['href'] for i in node_p[len(node_p)//2:]]
                    rprint('{}: {} ==>\n{}'.format(title, link, url))
                    flag = True
            if flag:
                choice = input('是否继续：')
                if choice == '' or choice in 'yY':
                    page += 1
                    continue
                elif choice in 'nN':
                    break
                else:
                    rprint(f'输入有误，可重新运行，从第 {page+1} 页继续')
                    break
        else:
            rprint(f'第 {page} 页检索完毕，继续下一页')
            continue
        break

if __name__ == '__main__':
    choice = input('输入选择（回车=>网络，se=>搜索，其他=>本地）：')
    service.path = 'd:/log/mrcong.db'

    if choice == '':
        ips = get_ip1() + get_ip2()
        urls = []
        n = 1
        while True:
            part = web(n)
            urls += part
            if len(part) >= 20:
                print(f'检索第 {n+1} 页')
                n += 1
            else:
                break
    elif choice == 'se':
        ips = get_ip1() + get_ip2()
        titles = []
        while True:
            title = input('输入：')
            titles.append(title)
            if titles[-2:] == ['', '']:
                break
        search(titles[:-2])
    else:
        while True:
            date = input('输入日期（回车==>今天，-n==>过去第n天）：\n')
            if date == '':
                date = time.strftime('%Y.%m.%d', time.localtime())
                break
            elif type(eval(date)) == int and eval(date) < 0:
                date = time.strftime('%Y.%m.%d', time.localtime(time.mktime(time.localtime()) + eval(date)*24*60*60))
                break
            else:
                print('输入有误，重新输入')
                continue
        urls = local(date)

    total_num = service.query2('select count(*) from mrcong')[0][0]
    rprint(f'\n[green]【数据库中现共有 {total_num} 条数据】')

    if len(urls) > 0:
        start = input(f'上面有 {len(urls)} 个下载链接，输入下载范围 [start, end]：\nstart = ')
        start = 0 if start == '' else int(start)
        end = input('end = ')
        urls = urls[start:] if end == '' else urls[start:int(end)+1]

        for url in urls:
            dl(url)
            next = input('继续：')
    else:
        print('无需下载文件')

    suburls = [[3*_, 3*_+3] for _ in range(len(urls) // 3)]
    suburls[-1][-1] = len(urls)
    for m,n in suburls:
        for url in urls[m:n]:
            dl(url)
        next = input('继续：')
