import multiprocessing
from multiprocessing.pool import ThreadPool
import threading
from functools import partial
import os
import time
import tqdm
import json
from impl.common import DownloaderState
import csv

_thread_local_variables = threading.local()
_fault_tolerance = 100


class BaiduDownloader:
    def __init__(self, workspace_dir, enable_multiprocessing=True, proxy_address=None, headless=False,
                 database_config: dict = None, enable_io_perf_stat: bool = False, rich_log: bool = False,
                 new_requests_launch_interval: float = 0.5, retry_times_on_empty_response: int = 20,
                 worker_timeout: int = 360):
        self.workspace_dir = workspace_dir
        self.subprocess_state_value = multiprocessing.Value('i') if enable_multiprocessing else None
        self.proxy_address = proxy_address
        self.headless = headless
        self.db_config = database_config
        self.enable_io_perf_stat = enable_io_perf_stat
        self.rich_log = rich_log
        self.new_requests_launch_interval = new_requests_launch_interval
        self.retry_times_on_empty_response = retry_times_on_empty_response
        self.worker_timeout = worker_timeout

    def download(self, category_identity, search_name, target_number):
        if self.subprocess_state_value:
            from impl.worker_entry import download_worker_entry
            p = multiprocessing.Process(target=download_worker_entry,
                                        args=(category_identity, search_name, self.workspace_dir, self.db_config, target_number,
                                              self.proxy_address, self.headless, self.enable_io_perf_stat, self.rich_log,
                                              self.new_requests_launch_interval, self.retry_times_on_empty_response,
                                              self.subprocess_state_value))
            p.start()
            p.join(self.worker_timeout)
            if p.exitcode != 0:
                if p.exitcode is None:
                    try:
                        p.kill()
                    except Exception:
                        pass
                return DownloaderState(DownloaderState.Fail), 0
            downloaded_images = self.subprocess_state_value.value
            if downloaded_images >= target_number:
                return DownloaderState.Done
            elif downloaded_images > 0:
                return DownloaderState.Unfinished
            elif downloaded_images == 0:
                return DownloaderState.Fail
            else:
                return DownloaderState.Skipped
        else:
            from impl.baidu import download_image_search_result_from_baidu
            state, _ = download_image_search_result_from_baidu(category_identity, search_name, self.workspace_dir,
                                                               self.db_config, target_number,
                                                               self.proxy_address,
                                                               self.headless, self.enable_io_perf_stat, self.rich_log,
                                                               self.new_requests_launch_interval,
                                                               self.retry_times_on_empty_response)
            return state


def load_keywords(file_path: str):
    keywords = []
    with open(file_path, 'r', newline='', encoding='utf-8') as f:
        csv_reader = csv.reader(f)
        for row in csv_reader:
            if len(row) == 0:
                continue
            keywords.append(row)
    return keywords


def load_identities(file_path: str):
    ids = []
    with open(file_path, 'r', newline='', encoding='utf-8') as f:
        while True:
            id_ = f.readline()
            if len(id_) == 0:
                break
            id_ = id_.strip()
            if len(id_) > 0:
                ids.append(id_)
    return ids


def _download_keywords_on_baidu_image(downloader, target_number, process_bar, category_identity, category_keywords):
    for keyword in category_keywords:
        process_bar.set_description(f'Downloading: {keyword}({category_identity})')
        downloader_state = downloader.download(category_identity, keyword, target_number)
        if downloader_state != DownloaderState.Fail:
            _thread_local_variables.fail_times = 0
        else:
            if hasattr(_thread_local_variables, 'fail_times'):
                _thread_local_variables.fail_times += 1
            else:
                _thread_local_variables.fail_times = 1
            if _thread_local_variables.fail_times >= _fault_tolerance:
                time.sleep(200)
                _thread_local_variables.fail_times = _fault_tolerance / 2
        process_bar.update()
        return downloader_state


def download(keyword_file, category_id_file, workspace_dir, desire_num_per_category: int,
             enable_mysql: bool, enable_multiprocessing: bool = True, proxy_address: str = None, headless: bool = False,
             num_threads: int = 0, slice_begin: int = None, slice_end: int = None, enable_io_perf_stat: bool = False,
             rich_log: bool = False,
             new_requests_launch_interval: float = 0.5, retry_times_on_empty_response: int = 20, worker_timeout: int=360):
    keywords = load_keywords(keyword_file)
    if category_id_file is None:
        category_identities = [f"{i:09}" for i in range(len(keywords))]
    else:
        category_identities = load_identities(category_id_file)
        assert len(keywords) == len(category_identities)

    if slice_begin is not None or slice_end is not None:
        keywords = keywords[slice_begin: slice_end]
        category_identities = category_identities[slice_begin: slice_end]

    database_config = None
    if enable_mysql:
        json_file_path = os.path.join(os.path.dirname(__file__), 'db_config.json')
        with open(json_file_path) as f:
            database_config = json.load(f)

    downloader = BaiduDownloader(workspace_dir,
                                 enable_multiprocessing, proxy_address, headless, database_config, enable_io_perf_stat,
                                 rich_log,
                                 new_requests_launch_interval, retry_times_on_empty_response, worker_timeout)

    while True:
        with tqdm.tqdm(total=len(category_identities)) as process_bar:
            download_func = partial(_download_keywords_on_baidu_image, downloader, desire_num_per_category,
                                    process_bar)
            if num_threads == 0:
                states = [download_func(category_identity, category_keywords)
                          for category_identity, category_keywords in zip(category_identities, keywords)]
            else:
                with ThreadPool(num_threads) as pool:
                    states = pool.starmap(download_func, zip(category_identities, keywords))
        if all([state == DownloaderState.Done or state == DownloaderState.Skipped for state in states]):
            break


import argparse


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('keyword_file', type=str, help='Path to keyword file')
    parser.add_argument('workspace_dir', type=str, help='Path to store images')
    parser.add_argument('number_per_category', type=int, help='Number of images per category')
    parser.add_argument('--category-id-file', type=str, help='Path to category identity file')
    parser.add_argument('--slice-begin', type=int, help='Begin index of categories')
    parser.add_argument('--slice-end', type=int, help='End index of categories')
    parser.add_argument('--num-threads', type=int, default=0, help='Number of concurrent threads')
    parser.add_argument('--disable-multiprocessing', action='store_true', help='Disable multiprocessing')
    parser.add_argument('--proxy', type=str, help='Proxy address')
    parser.add_argument('--headless', action='store_true', help='Running chrome in headless mode')
    parser.add_argument('--use-mysql', action='store_true', help='Using MySQL to store meta data')
    parser.add_argument('--io-perf-stat', action='store_true', help='Enable I/O related operation performance statistics')
    parser.add_argument('--new-requests-launch-interval', type=float, default=0.5)
    parser.add_argument('--retry-times-on-empty-response', type=int, default=20)
    parser.add_argument('--enable-rich-log', action='store_true')
    parser.add_argument('--worker-timeout', type=int, default=360)
    args = parser.parse_args()
    download(args.keyword_file, args.category_id_file, args.workspace_dir, args.number_per_category, args.use_mysql,
             not args.disable_multiprocessing, args.proxy, args.headless, args.num_threads,
             args.slice_begin, args.slice_end, args.io_perf_stat, args.enable_rich_log,
             args.new_requests_launch_interval, args.retry_times_on_empty_response, args.worker_timeout)
