import concurrent.futures as cf
import os
import socket
import sqlite3
import time
import urllib.request as ur
from datetime import datetime
from random import randint

import requests
from bs4 import BeautifulSoup
from requests import exceptions

from config import *

# 设置超时时间
socket.setdefaulttimeout(timeout)


# 数据库创建及插入信息
def com_log(init_details, database_name, table_name, from_tmp=True):
    """创建数据库(若尚未创建)，保存初始详情列表，加载缓存

    :param init_details: 从目录网页上获取的初始详情列表——每个元素是一个包含一个详情页的相对链接和发布日期的元组
    :param database_name: 缓存数据库名称
    :param table_name: 缓存表名称
    :param from_tmp: 是否从已有缓存下载
    :return: 返回应当下载的所有详情列表，格式与init_details相同
    """

    conn = sqlite3.connect(database_name)
    cursor = conn.cursor()

    # 检查缓存表是否存在，不存在则创建
    cursor.execute(f"select count(*) from sqlite_master where type = 'table' and name = '{table_name}'")
    exist = cursor.fetchone()[0]
    if not exist:
        cursor.execute(table_create)


    if from_tmp:
        # 断点续传模式：读取库中所有需要下载的记录，返回相对链接和目录
        cursor.execute(f'SELECT * FROM {table_name} WHERE status <> 0')
        new_details = [(fd[0], fd[2]) for fd in cursor.fetchall() if fd[1]]
        cursor.close()
        conn.close()
        return new_details
    else:
        # 全新下载模式
        ensure = input("The download recodings will DELETE COMPLETELY!\nContinue? (Y/[n])\n")
        if ensure == 'Y':
            cursor.execute(f"DELETE FROM {table_name}")
            # 更新表格内容
            for detail in init_details:
                insert = f'INSERT INTO {table_name} VALUES("{detail[0]}", 2, "{detail[1]}")'
                cursor.execute(insert)
            print(f"写入新记录{len(init_details)}条.")
            conn.commit()
            return init_details




# 更新数据库信息
def update(database_name, table_name, re_link, status):
    """在下载之后更新数据库中的下载状态。

    :param database_name: 数据库名称
    :param table_name: 表名称
    :param re_link: 相对链接，主键
    :param status: 修改后的状态
    """
    conn = sqlite3.connect(database_name)
    conn.execute(f'UPDATE {table_name} SET status = {status} WHERE re_link = "{re_link}";')
    conn.commit()
    conn.close()


def get_info(cata_link, domain):
    """根据域名和目录页链接，获取详情页链接和发布日期

    :param cata_link: 目录页相对链接
    :param domain: 域名
    """
    soup = BeautifulSoup(requests.get(domain + cata_link, headers=headers, timeout=timeout).text, 'lxml')
    detail_link = [item.get('href') for item in soup.select(detail_link_selector)]
    date_strs = [item.get_text().split()[-1][1:-1] for item in soup.select(date_selector)]
    dir_path = [date_pattern.match(ds).group() for ds in date_strs]
    time.sleep(randint(10, 30) * 0.1)
    return list(zip(detail_link, dir_path))


def download(detail, download_dir):
    """下载资源，更新数据库并分目录保存

    :param detail: 详情元组，包括相对链接和下载位置的相对目录
    :param download_dir: 下载目录
    :return: 若下载成功，返回文件名和占用空间；否则返回详情页URL和0
    """

    # 创建本地目录
    dir_path = download_dir + detail[1]
    if not os.path.exists(dir_path):
        try:
            os.mkdir(dir_path)
        except FileExistsError:
            pass

    # 下载资源
    time.sleep(randint(10, 30) * 0.1)
    url = domain + detail[0][1:]  # 详情页链接
    try:
        soup = BeautifulSoup(requests.get(url, headers=headers, timeout=timeout).text, 'lxml')
        voice_link = soup.select(voice_link_selector)[0].get('href')    # 听力音频的相对链接
        file_name = voice_link.split("/")[-1][:-4]                      # 文件下载后的命名
        file_path = dir_path + "/" + file_name                          # 本地文件的绝对路径(无扩展名)

        # 下载听力文本
        text_list = [item.get_text() for item in soup.select(text_selector)]
        with open(file_path + ".txt", 'w', encoding='utf8') as f:
            f.write("\n".join(text_list))
        update(database_name, table_name, detail[0], 1)     # 下载成功后写入数据库
        txt_size = os.path.getsize(file_path + ".txt")      # 文本文件大小

        # 下载听力音频
        time.sleep(randint(10, 30) * 0.1)
        ur.urlretrieve(voice_link, file_path + ".mp3")
        update(database_name, table_name, detail[0], 0)     # 下载成功后写入数据库
        voice_size = os.path.getsize(file_path + ".mp3")    # 音频文件大小

        return file_name, txt_size + voice_size
    except Exception:
        return url, 0



def get_details(domain, init_link):
    """获取初始详情列表

    :param domain: 网站域名
    :param init_link: 初始页相对链接
    :return: 详情页链接列表
    """
    # 获取文件列表
    print("正在获取目录页链接...")
    try:
        init_html = requests.get(domain + init_link, headers=headers, timeout=timeout)
        init_soup = BeautifulSoup(init_html.text, 'lxml')
        page_list = init_soup.select(cata_selector)
        cata_links = [page.get('href') for page in page_list]
        time.sleep(3)
        print("目录页链接获取完毕.")

        # 获取详情页链接
        with cf.ThreadPoolExecutor() as t_executor1:
            details = []
            future_to_page = [t_executor1.submit(get_info, cata_link, domain) for cata_link in cata_links]
            print("正在获取详情页链接列表...")
            for future in cf.as_completed(future_to_page):
                details += future.result()
            print("详情页链接列表获取完毕.")
        return details
    except Exception:
        print("链接获取超时！将利用数据库缓存信息下载...")


def downloads(d2d):
    """下载资源

    :param d2d: 待下载的详情列表.
    """
    count = len(d2d)
    with cf.ThreadPoolExecutor() as t_executor2:
        future_to_download = [t_executor2.submit(download, detail, download_dir) for detail in d2d]
        size_real = 0
        errors = []
        init_time = datetime.now()
        for i, future in enumerate(cf.as_completed(future_to_download), 1):
            spend = datetime.now() - init_time
            result = future.result()
            if result[1]:
                print(f"{i}: ")
                size_real += result[1]
                size_pred = size_real / i * count
                time_pred = spend / i * (count - i)
                print("共{}个, 第{}个, 已花费{}\b\b\b\b, 剩余时间{}\b\b\b\b; 已占用{:.2f} MB, 预计占用总空间{:.2f} MB. {} download complete."
                      .format(count, i, spend, time_pred, size_real / 1048576, size_pred / 1048576, result[0]))
            else:
                errors.append(result[0])
                print(f"{i}: 下载错误! \n网址: {result[0]}")
        print(f"下载成功{count - len(errors)}个, 失败{len(errors)}个.")
        if errors:
            with open(failed, "w", encoding="utf-8") as f:
                f.write("\n".join(errors))
            print(f"查看{failed}获取失败详情.")


if __name__ == '__main__':
    if from_tmp:
        init_details = []
    else:
        init_details = get_details(domain, init_link)[:num]
    details = com_log(init_details, database_name, table_name, from_tmp=from_tmp)
    if details:
        print(f"有{len(details)}份资源可下载.")
        details_to_download = details
        print(f"即将下载{len(details_to_download)}份听力材料.")
        downloads(details_to_download)
    else:
        print("无可用资源，请检查配置！")
