from .db.database import DatabaseConnection
from .db.model.record import BingCrawlerRecordDAO
from .db.model.deduplication import BingCrawlerDeduplicationDAO
import requests
import os
import copy
import random
from .mimetypes import guess_ext_from_mimetype
from .lock import DownloaderLock
from impl.perf_stat.function_call import FunctionCallPerfStat, record_running_time
from contextlib import nullcontext
import time


@record_running_time
def _write_file(f, chunk):
    f.write(chunk)


def _download_image(download_url, save_file_path_prefix, session, header, connection_timeout, min_size_filter, verbose_logging):
    try:
        image_request = session.get(download_url, stream=True, headers=header, timeout=connection_timeout)
        image_request.raise_for_status()
        if "content-type" not in image_request.headers:
            if verbose_logging:
                print(f'No content-type entry in headers, url: {download_url}, skipping')
            return None
        content_type = image_request.headers["content-type"]
        mimetype = (content_type.split(';')[0].strip())
        if mimetype not in guess_ext_from_mimetype:
            if verbose_logging:
                print(f'Unknown mimetype {mimetype} returned by {download_url}')
            return None
        extension = guess_ext_from_mimetype[mimetype]
        save_file_path = save_file_path_prefix + extension

        if os.path.exists(save_file_path):
            os.remove(save_file_path)
        image_size = 0
        with open(save_file_path + '.tmp', 'wb') as f:
            for chunk in image_request.iter_content(chunk_size=8192):
                image_size += len(chunk)
                _write_file(f, chunk)
        if image_size < min_size_filter:
            os.remove(save_file_path + '.tmp')
            return None
        os.rename(save_file_path + '.tmp', save_file_path)
        return extension
    except requests.RequestException as e:
        if verbose_logging:
            print(f'{download_url}: {e}')
        return None


def _try_download_multiple_candidates_with_priority(candidate_urls_with_header, save_file_path_prefix, session, connection_timeout, min_size_filter, verbose_logging):
    for candidate_url, header in candidate_urls_with_header:
        file_extension = _download_image(candidate_url, save_file_path_prefix, session, header, connection_timeout, min_size_filter, verbose_logging)
        if file_extension is not None:
            return file_extension, candidate_url
    return None, None


def _get_random_ua_header(headers: dict):
    headers = copy.copy(headers)
    user_agent = random.choice(headers['USER_AGENTS'])
    for k, v in headers.items():
        if k == 'USER_AGENTS':
            continue
        v.update(user_agent)
    del headers['USER_AGENTS']
    return headers


def _download(category_identity: str, search_word: str, workspace_dir: str, thumbnail_only: bool, engine,
              downloader_config, header_config,
              min_size_filter: int, database: DatabaseConnection, record_dao: BingCrawlerRecordDAO,
              deduplication_dao: BingCrawlerDeduplicationDAO, proxy_server_address, verbose=False):
    headers = _get_random_ua_header(header_config)
    connection_timeout = (downloader_config['requests']['connection_timeout'], downloader_config['requests']['read_timeout'])
    downloaded_images = record_dao.count_by_identity(category_identity)
    downloaded_image_ids = set()
    with requests.Session() as session:
        if proxy_server_address is not None:
            proxies = {
                'http': proxy_server_address,
                'https': proxy_server_address,
            }
            session.proxies.update(proxies)
        if engine == 'requests':
            from .crawl_engine.json_api import BingImageJsonAPICrawler
            crawl_engine = BingImageJsonAPICrawler(search_word, thumbnail_only, session, headers, connection_timeout,
                                                   verbose)
        else:
            raise NotImplementedError
        crawl_engine_context_object = crawl_engine if hasattr(crawl_engine, '__enter__') else nullcontext()
        with crawl_engine_context_object:
            new_record_list = []
            this_run_downloaded_images = 0
            new_images_list = []
            deduplication_records_timeout_begin = time.perf_counter()

            while True:
                parsed = crawl_engine.get_next()

                if parsed is None or len(parsed) == 0:
                    break

                for index, (image_url_candidates_with_header, thumbnail_url, image_id, source_url, source_image_url, caption) in enumerate(parsed):
                    if image_id in downloaded_image_ids:
                        continue
                    else:
                        downloaded_image_ids.add(image_id)
                    if record_dao.exists(category_identity, image_id):
                        continue

                    image_file_name = deduplication_dao.get_file_name(image_id)
                    if image_file_name is None:
                        image_file_prefix = f'{downloaded_images:08d}'
                        save_file_path_prefix = os.path.join(workspace_dir, category_identity, image_file_prefix)
                        image_file_extension, download_url = _try_download_multiple_candidates_with_priority(image_url_candidates_with_header, save_file_path_prefix, session, connection_timeout, min_size_filter, verbose)
                        if image_file_extension is None:
                            continue

                        image_file_name = f'{category_identity}/{image_file_prefix}{image_file_extension}'
                        new_images_list.append((image_id, image_file_name))
                        deduplication_records_timeout_end = time.perf_counter()
                        if deduplication_records_timeout_end - deduplication_records_timeout_begin > 1:
                            try:
                                deduplication_dao.insert_multiple(new_images_list)
                                database.commit()
                                new_images_list.clear()
                                deduplication_records_timeout_begin = time.perf_counter()
                            except Exception:
                                database.rollback()
                                raise
                        downloaded_images += 1
                        this_run_downloaded_images += 1
                    else:
                        download_url = source_image_url

                    new_record_list.append((category_identity, image_id, search_word, image_file_name, download_url,
                                            source_url, source_image_url, caption))

    if len(new_record_list) > 0 or len(new_images_list) > 0:
        try:
            if len(new_images_list) > 0:
                deduplication_dao.insert_multiple(new_images_list)
            if len(new_record_list) > 0:
                record_dao.insert_multiple(new_record_list)
            database.commit()
        except Exception:
            database.rollback()
            raise
    return this_run_downloaded_images


def download_image_search_result_from_bing(category_identity: str, search_word: str, workspace_dir: str,
                                           thumbnail_only: bool,
                                           db_config: dict, downloader_config: dict, header_config: dict, engine: str,
                                           proxy_server_address: str = None,
                                           min_size_filter: int = 1024, verbose: bool = False,
                                           enable_io_perf_stat: bool = False,
                                           file_lock_expired_time: float = 1800  # half hour
                                           ):
    try:
        perf_stat = FunctionCallPerfStat(True) if enable_io_perf_stat else nullcontext()

        with perf_stat:
            lock = DownloaderLock(os.path.join(workspace_dir, category_identity), file_lock_expired_time)
            if not lock.try_get_lock():
                return 0
            with lock:
                database = DatabaseConnection(db_config)
                with database:
                    record_dao = BingCrawlerRecordDAO(database)
                    deduplication_dao = BingCrawlerDeduplicationDAO(database)
                    return _download(category_identity, search_word, workspace_dir, thumbnail_only,
                                     engine, downloader_config, header_config, min_size_filter,
                                     database, record_dao, deduplication_dao, proxy_server_address, verbose)
    except Exception as e:
        if verbose:
            print(e)
        raise
