import multiprocessing
from multiprocessing.pool import ThreadPool
import threading
from functools import partial
import os
import time
import tqdm
import json
import csv

_thread_local_variables = threading.local()
_fault_tolerance = 100
_downloaded = 0


class BingDownloader:
    def __init__(self, workspace_dir, thumbnail_only, enable_multiprocessing,
                 database_config: dict, downloader_config: dict, header_config: dict, engine: str,
                 worker_timeout: float, proxy: str, min_size_filter: int,
                 verbose: bool, enable_io_perf_stat: bool):
        self.workspace_dir = workspace_dir
        self.thumbnail_only = thumbnail_only
        self.subprocess_state_value = multiprocessing.Value('i') if enable_multiprocessing else None
        self.db_config = database_config
        self.downloader_config = downloader_config
        self.header_config = header_config
        self.engine = engine
        self.worker_timeout = worker_timeout
        self.proxy = proxy
        self.min_size_filter = min_size_filter
        self.verbose = verbose
        self.enable_io_perf_stat = enable_io_perf_stat

    def download(self, category_identity, search_word):
        if self.subprocess_state_value:
            from impl.worker_entry import download_worker_entry
            p = multiprocessing.Process(target=download_worker_entry,
                                        args=(category_identity, search_word, self.workspace_dir, self.thumbnail_only,
                                              self.db_config, self.downloader_config, self.header_config, self.engine,
                                              self.proxy, self.min_size_filter, self.verbose, self.enable_io_perf_stat,
                                              self.subprocess_state_value))
            p.start()
            p.join(self.worker_timeout)
            if p.exitcode != 0:
                if p.exitcode is None:
                    try:
                        p.kill()
                    except Exception:
                        pass
                return -1
            return self.subprocess_state_value.value
        else:
            from impl.bing_downloader import download_image_search_result_from_bing
            return download_image_search_result_from_bing(category_identity, search_word, self.workspace_dir,
                                                          self.thumbnail_only, self.db_config, self.downloader_config,
                                                          self.header_config, self.engine, self.proxy,
                                                          self.min_size_filter, self.verbose, self.enable_io_perf_stat)


def load_keywords(file_path: str):
    keywords = []
    with open(file_path, 'r', newline='', encoding='utf-8') as f:
        csv_reader = csv.reader(f)
        for row in csv_reader:
            keywords.append(row)
    return keywords


def load_identities(file_path: str):
    ids = []
    with open(file_path, 'r', newline='', encoding='utf-8') as f:
        while True:
            id_ = f.readline()
            if len(id_) == 0:
                break
            id_ = id_.strip()
            if len(id_) > 0:
                ids.append(id_)
    return ids


def _category_download_loop(downloader, process_bar, category_identity, category_keywords):
    for keyword in category_keywords:
        process_bar.set_description(f'Downloading: {keyword}({category_identity})')
        downloaded_images = downloader.download(category_identity, keyword)
        if downloaded_images != -1:
            _thread_local_variables.fail_times = 0
            global _downloaded
            _downloaded += downloaded_images
            process_bar.set_postfix_str(str(_downloaded))
        else:
            if hasattr(_thread_local_variables, 'fail_times'):
                _thread_local_variables.fail_times += 1
            else:
                _thread_local_variables.fail_times = 1
            if _thread_local_variables.fail_times >= _fault_tolerance:
                sleep_time = 200
                print(f'thread {threading.current_thread().ident} failed for {_fault_tolerance} times, sleep {sleep_time}s')
                time.sleep(sleep_time)
                _thread_local_variables.fail_times = _fault_tolerance / 2
        process_bar.update()


def download(keyword_file, category_id_file, workspace_dir, engine, thumbnail_only: bool = False,
             enable_multiprocessing: bool = True,
             num_threads: int = 0, slice_begin: int = None, slice_end: int = None,
             worker_timeout: float = 360, proxy: str = None,
             min_size_filter: int = 1024,
             verbose: bool = False, enable_io_perf_stat: bool = False,
             infinite_loop: bool = False):
    keywords = load_keywords(keyword_file)
    if category_id_file is None:
        category_identities = [f"{i:09}" for i in range(len(keywords))]
    else:
        category_identities = load_identities(category_id_file)
        assert len(category_identities) == len(keywords)

    if slice_begin is not None or slice_end is not None:
        keywords = keywords[slice_begin: slice_end]
        category_identities = category_identities[slice_begin: slice_end]

    def _no_empty_entries(keywords, category_identities):
        new_keywords = []
        new_category_identities = []
        for keyword, category_identity in zip(keywords, category_identities):
            if len(keyword) > 0:
                new_keywords.append(keyword)
                new_category_identities.append(category_identity)
            else:
                print(f'Omitting {category_identity} for empty search keywords')
        return new_keywords, new_category_identities

    keywords, category_identities = _no_empty_entries(keywords, category_identities)

    db_config_path = os.path.join(os.path.dirname(__file__), 'db_config.json')
    with open(db_config_path) as f:
        database_config = json.load(f)

    downloader_config_path = os.path.join(os.path.dirname(__file__), 'config.json')
    with open(downloader_config_path) as f:
        downloader_config = json.load(f)

    header_config_path = os.path.join(os.path.dirname(__file__), 'header.json')
    with open(header_config_path) as f:
        header_config = json.load(f)

    downloader = BingDownloader(workspace_dir, thumbnail_only,
                                enable_multiprocessing, database_config, downloader_config, header_config,
                                engine, worker_timeout, proxy, min_size_filter,
                                verbose, enable_io_perf_stat)

    with tqdm.tqdm(total=len(category_identities)) as process_bar:
        while True:
            download_func = partial(_category_download_loop, downloader, process_bar)
            if num_threads == 0:
                for category_identity, category_keywords in zip(category_identities, keywords):
                    download_func(category_identity, category_keywords)
            else:
                with ThreadPool(num_threads) as pool:
                    pool.starmap(download_func, zip(category_identities, keywords))
            if infinite_loop:
                process_bar.reset(total=len(category_identities))
            else:
                break


import argparse

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('keyword_file', type=str, help='Path to category keywords file')
    parser.add_argument('workspace_dir', type=str, help='Path to store images')
    parser.add_argument('--engine', type=str, choices=['requests'], default='requests')
    parser.add_argument('--category-id-file', type=str, help='Path to category identity file')
    parser.add_argument('--slice-begin', type=int, help='Begin index of categories')
    parser.add_argument('--slice-end', type=int, help='End index of categories')
    parser.add_argument('--num-threads', type=int, default=0, help='Number of concurrent threads')
    parser.add_argument('--disable-multiprocessing', action='store_true', help='Disable multiprocessing')
    parser.add_argument('--enable-io-perf-stat', action='store_true',
                        help='Enable I/O related operation performance statistics')
    parser.add_argument('--worker-timeout', type=float, default=1800)
    parser.add_argument('--verbose', action='store_true')
    parser.add_argument('--infinite-loop', action='store_true')
    parser.add_argument('--min-size-filter', type=int, default=1024, help='Minimum size of image')
    parser.add_argument('--proxy', type=str, help='proxy server address, eg: http://127.0.0.1:7080')
    parser.add_argument('--thumbnail-only', action='store_true')
    args = parser.parse_args()
    download(args.keyword_file, args.category_id_file, args.workspace_dir, args.engine, args.thumbnail_only,
             not args.disable_multiprocessing, args.num_threads,
             args.slice_begin, args.slice_end, args.worker_timeout, args.proxy,
             args.min_size_filter, args.verbose, args.enable_io_perf_stat, args.infinite_loop)
