#!/usr/bin/env python
# coding=utf-8

import os
import requests
from concurrent.futures.thread import ThreadPoolExecutor
import time

from find_links_in_file import find_links_in_file


def _find_links_in_directory(dir_path: str) -> dict:
    ret = dict()
    file_paths = list()

    for root, dirs, files in os.walk(dir_path):
        # 必须忽略.git 目录
        if root.endswith('.git'):
            continue
        # 处理的文档为markdown文档
        for file_name in files:
            if not file_name.endswith('.md'):
                continue
            file_path = os.path.join(root, file_name)
            file_paths.append(file_path)
    for file_path in file_paths:
        links = find_links_in_file(file_path)
        ret[file_path] = links
    return ret


def _check_valid_file_path(file: str, link: str) -> bool:
    dir_path = os.path.dirname(file)
    join_path = os.path.join(dir_path, link)
    return os.path.exists(join_path)


def _check_valid_url(url: str, files: list) -> tuple:
    print(f"Process {url}...")
    try:
        data = requests.get(url)
        if data.status_code != 200:
            return url, files, False
        else:
            return url, files, True
    except:
        return url, files, False


def _check_urls(urls: dict) -> list:
    ret = list()
    time_start = time.time()
    with ThreadPoolExecutor() as pool:
        futures = [pool.submit(_check_valid_url, url, files) for url, files in urls.items()]
        pool.shutdown(wait=True)
        for fut in futures:
            url_status = fut.result()
            if not url_status[2]:
                ret.append((url_status[0], url_status[1]))
    time_stop = time.time()
    print(f"Url Comsumes time {time_stop - time_start} seconds")

    return ret


def find_error_links_in_directory(dir_path: str) -> dict:
    ret = dict()
    if not (os.path.isdir(dir_path) and os.path.exists(dir_path)):
        return ret

    url_links = dict()
    links_dict = _find_links_in_directory(dir_path)
    for file_path, links in links_dict.items():
        err_links = list()
        for link in links:
            # 网络可以采用并发模式一起检测
            if link.startswith("https:") or link.startswith("http:"):
                if link not in url_links:
                    url_links[link] = list()
                if file_path not in url_links[link]:
                    url_links[link].append(file_path)
            else:
                if not _check_valid_file_path(file_path, link):
                    err_links.append(link)
        if err_links:
            ret[file_path] = err_links

    err_urls = _check_urls(url_links)
    for err_url in err_urls:
        url = err_url[0]
        err_url_file_paths = err_url[1]
        for file_path in err_url_file_paths:
            if file_path not in ret:
                ret[file_path] = list()
            ret[file_path].append(url)

    return ret


if __name__ == "__main__":
    err_ret = find_error_links_in_directory('data')
    print(err_ret)
