import numpy as np
import urllib.parse
import traceback
import os
from contextlib import nullcontext
from .perf_stat.function_call import FunctionCallPerfStat
from .web_driver import get_default_web_driver
from .io import DownloaderIOOps
from .common import DownloaderState
from typing import Dict
from seleniumwire.request import Response
import json
import time
from ._baidu_misc import _parse_baidu_json, _get_valid_json
from datetime import datetime

debug = True


class _ImageContext:
    content: bytes
    source_url: str
    source_image_url: str
    caption: str


def _check_has_valid_response(request):
    response: Response = request.response

    if response is None:
        return False

    if response.status_code < 200 or response.status_code >= 400:
        return False

    if response.body is None or len(response.body) == 0:
        return False

    return True


def _check_has_valid_response_and_image_type(request):
    if not _check_has_valid_response(request):
        return False
    response: Response = request.response

    content_type = response.headers['Content-Type']
    if content_type is None:
        return False

    ext = (content_type.split(';')[0].strip())
    if ext is None or not ext.startswith('image'):
        return False

    return True


def _get_file_name_from_url(url):
    return os.path.basename(urllib.parse.urlparse(url).path)


def _save_downloaded_images(io_operator: DownloaderIOOps, downloaded: set, contexts, num_downloaded_images,
                            target_number, disp_prefix: str):
    for downloaded_thumbnail_url in downloaded:
        file_name = _get_file_name_from_url(downloaded_thumbnail_url)
        if io_operator.has_file(file_name):
            del contexts[downloaded_thumbnail_url]
            continue
        image_context: _ImageContext = contexts[downloaded_thumbnail_url]
        io_operator.save(file_name, image_context.content)
        io_operator.save_meta(file_name, downloaded_thumbnail_url, image_context.source_url,
                              image_context.source_image_url, image_context.caption)
        num_downloaded_images += 1
        if disp_prefix is not None:
            disp_string = f'{disp_prefix}: {num_downloaded_images}/{target_number} {file_name}'
            print(disp_string)


def _merge_results_with_downloader_contexts(results, contexts):
    downloaded = []
    for thumbnail_url, image_content, caption, source_url, source_image_url in results:
        if thumbnail_url not in contexts:
            image_context = _ImageContext()
            image_context.content = image_content
            contexts[thumbnail_url] = image_context
        else:
            image_context: _ImageContext = contexts[thumbnail_url]
        image_context.caption = caption
        image_context.source_url = source_url
        image_context.source_image_url = source_image_url
        if image_context.content is not None:
            downloaded.append(thumbnail_url)
    return downloaded


def _merge_image_raw_with_downloader_contexts(thumbnail_url, image_raw, contexts):
    if thumbnail_url not in contexts:
        image_context = _ImageContext()
        image_context.caption = None
        image_context.source_url = None
        image_context.source_image_url = None
        image_context.content = image_raw
        return False
    else:
        image_context = contexts[thumbnail_url]
        image_context.content = image_raw
        return True


def _parse_web_page(request, contexts: Dict[str, _ImageContext], downloaded_urls: set):
    if not _check_has_valid_response(request):
        return
    prefetch_content = request.response.body
    pattern = b"app.setData('imgData', "
    index = prefetch_content.find(pattern)
    assert index != -1
    index += len(pattern)
    end_pattern = b');'
    end_index = prefetch_content.find(b'app.setData(', index)
    if end_index != -1:
        end_index = prefetch_content.rfind(end_pattern, index, end_index)
    else:
        end_index = prefetch_content.find(end_pattern, index)
    assert end_index != -1
    prefetch_content = prefetch_content[index: end_index]
    try:
        results = _parse_baidu_json(json.loads(_get_valid_json(prefetch_content), strict=False))
    except json.JSONDecodeError:
        if debug:
            with open(f'{datetime.now().strftime("%Y.%m.%d-%H.%M.%S-%f")}.html', 'wb') as f:
                f.write(request.response.body)
        raise
    downloaded_urls.update(_merge_results_with_downloader_contexts(results, contexts))


def _parse_XHR(request, contexts: Dict[str, _ImageContext], downloaded_urls: set):
    if not _check_has_valid_response(request):
        return
    try:
        downloaded_urls.update(_merge_results_with_downloader_contexts(
            _parse_baidu_json(json.loads(_get_valid_json(request.response.body), strict=False)), contexts))
    except json.JSONDecodeError:
        if debug:
            with open(f'{datetime.now().strftime("%Y.%m.%d-%H.%M.%S-%f")}.json', 'wb') as f:
                f.write(request.response.body)
        raise


def _parse_image_request(request, contexts: Dict[str, _ImageContext], downloaded_urls: set):
    if not _check_has_valid_response_and_image_type(request):
        return
    if _merge_image_raw_with_downloader_contexts(request.url, request.response.body, contexts):
        downloaded_urls.add(request.url)


def _parse_requests(requests, contexts: Dict[str, _ImageContext]):
    downloaded_urls = set()
    for request in requests:
        if request.url.startswith('https://image.baidu.com/search/index?tn=baiduimage'):
            _parse_web_page(request, contexts, downloaded_urls)
        elif request.url.startswith('https://image.baidu.com/search/acjson?tn=resultjson_com'):
            _parse_XHR(request, contexts, downloaded_urls)
        elif 'it/u=' in request.url:
            _parse_image_request(request, contexts, downloaded_urls)
    return downloaded_urls


def _download_loop(driver, io_operator: DownloaderIOOps, num_downloaded_images, target_number: int,
                   downloader_contexts: dict, rng: np.random.Generator, disp_prefix: str,
                   new_requests_launch_interval, retry_times_on_empty_response):
    tried_times = 0
    wait_for_request_done_sleep_time = 0.1
    last_run_downloaded = num_downloaded_images.item()

    page_height = 0

    while True:
        if tried_times > retry_times_on_empty_response:
            return num_downloaded_images - last_run_downloaded > 0

        downloaded_images = _parse_requests(driver.requests, downloader_contexts)
        del driver.requests

        if len(downloaded_images) == 0:
            tried_times += 1
        else:
            tried_times = 0

        _save_downloaded_images(io_operator, downloaded_images, downloader_contexts, num_downloaded_images,
                                target_number, disp_prefix)

        if num_downloaded_images < target_number:
            if len(downloaded_images) == 0 and tried_times > 2:
                driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                time.sleep(rng.uniform(new_requests_launch_interval * 0.8, new_requests_launch_interval * 1.2))
                new_page_height = driver.execute_script("return document.body.scrollHeight;")
                if new_page_height > page_height:
                    page_height = new_page_height
                    tried_times = 0
            else:
                time.sleep(wait_for_request_done_sleep_time)
        else:
            return True


def download_image_search_result_from_baidu(category_identity: str, search_name: str, workspace_dir: str,
                                            db_config: dict, target_number: int,
                                            proxy_address: str, headless: bool,
                                            enable_io_perf_stat: bool = False,
                                            enable_rich_log: bool = False,
                                            new_requests_launch_interval: float = 0.6,
                                            retry_times_on_empty_response: int = 30,
                                            file_lock_expired_time: int = 1800  # half hour
                                            ):
    perf_stat = FunctionCallPerfStat(True) if enable_io_perf_stat else nullcontext()
    with perf_stat:
        io_operator = DownloaderIOOps(category_identity, workspace_dir, db_config, file_lock_expired_time)
        if not io_operator.try_get_lock():
            return DownloaderState.Skipped, 0

        with io_operator:
            rng = np.random.Generator(np.random.PCG64())
            if enable_rich_log:
                disp_prefix = f'{search_name}({category_identity})'
            else:
                disp_prefix = None

            num_downloaded_images = io_operator.count()

            if num_downloaded_images >= target_number:
                return DownloaderState.Done, num_downloaded_images

            num_downloaded_images = np.asarray(num_downloaded_images)
            fault_tolerance = 2
            tried_times = 0

            downloader_contexts = {}

            while True:
                if tried_times == fault_tolerance:
                    break
                try:
                    driver = get_default_web_driver(proxy_address, headless)
                    with driver:
                        driver.get(
                            f'https://image.baidu.com/search/index?tn=baiduimage&word={urllib.parse.quote(search_name)}')
                        success_flag = _download_loop(driver, io_operator, num_downloaded_images, target_number,
                                                      downloader_contexts, rng, disp_prefix,
                                                      new_requests_launch_interval, retry_times_on_empty_response)

                        if success_flag:
                            if num_downloaded_images < target_number:
                                return DownloaderState.Unfinished, num_downloaded_images.item()
                            else:
                                return DownloaderState.Done, num_downloaded_images.item()
                        else:
                            return DownloaderState.Fail, num_downloaded_images.item()
                except Exception as e:
                    print(traceback.format_exc())
                    tried_times += 1

                    if debug:
                        raise e
