import asyncio
import configparser
from concurrent.futures import as_completed, ThreadPoolExecutor
import datetime
import math
import mmap
import threading

import aiohttp

from BrowserCookiesGetter import BrowserCookiesGetter
import DatabaseOperation
from PixivAPI import PixivAPI
import PackagedInfo
import PublicConstant
from Settings import Settings

shared_memory = mmap.mmap(-1, 8, 'thread_switch')
sema = threading.Semaphore(1)
has_new_page = threading.Semaphore(0)


def print_progress_bar(current_progress: int, total_progress: int, length: int = 16):
    progress_percentage: float = 100 * current_progress / total_progress
    length_of_arrow: int = (length * current_progress) // total_progress
    arrows = '>' * length_of_arrow
    dashes = '-' * (length - length_of_arrow)
    print(f'\r| {arrows}{dashes} |, {progress_percentage:.2f}% completed',
          end='', flush=True)


def download_details_of_one_page_without_thumbnails(_page_info: PackagedInfo.PackagedPageInfo, api: PixivAPI):
    pic_info_list = list()
    pic_info_list_append = pic_info_list.append
    counter: int = 0
    total_artwork_of_this_page = len(_page_info.artworks)
    with ThreadPoolExecutor(max_workers=6) as pool:
        # Download details
        pic_info_future_list = [
            pool.submit(api.get_pic_info,
                        each.id, each.thumbnail_url)
            for each in _page_info.artworks
        ]
        print(
            f'Downloading details of page {_page_info.page_index} of {_page_info.search_keyword} ')
        for future in as_completed(pic_info_future_list):
            # Bad design, see details in PixivAPI.get_pic_info_and_thumbnail
            pic_info = future.result()
            pic_info_list_append(pic_info)
            counter += 1
            print_progress_bar(counter, total_artwork_of_this_page)
    DatabaseOperation.insert_data_by_keyword(
        _page_info.search_keyword, pic_info_list)
    has_new_page.release()
    print()  # New line


async def download_details_of_one_page_without_thumbnails_async(
        _page_info: PackagedInfo.PackagedPageInfo, api: PixivAPI):
    pic_info_list = list()
    add_pic_info = pic_info_list.append
    counter: int = 0
    total_artwork_of_this_page = len(_page_info.artworks)
    max_worker = asyncio.Semaphore(
        PublicConstant.parse_page_parallel_num
    )  # The higher, the faster
    async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session:
        print(
            f'Downloading details of page {_page_info.page_index} of {_page_info.search_keyword} ')
        for future in asyncio.as_completed([
            api.get_pic_info_async(session, each.id, each.thumbnail_url, max_worker) for each in _page_info.artworks
        ]):
            add_pic_info(await future)
            counter += 1
            print_progress_bar(counter, total_artwork_of_this_page)
    # with ThreadPoolExecutor(max_workers=6) as pool:
    #     # Download details
    #     pic_info_future_list = [
    #         pool.submit(api.get_pic_info,
    #                     each.id, each.thumbnail_url)
    #         for each in _page_info.artworks
    #     ]
    #     print(
    #         f'Downloading details of page {_page_info.page_index} of {_page_info.search_keyword} ')
    #     for future in as_completed(pic_info_future_list):
    #         # Bad design, see details in PixivAPI.get_pic_info_and_thumbnail
    #         pic_info = future.result()
    #         pic_info_list_append(pic_info)
    #         counter += 1
    #         print_progress_bar(counter, total_artwork_of_this_page)
    DatabaseOperation.insert_data_by_keyword(
        _page_info.search_keyword, pic_info_list)
    has_new_page.release()
    print()  # New line


def parse_one_page_and_get_search_info(
        keyword: str, start_from: int,
        ecd: datetime.datetime | None, api: PixivAPI) -> PackagedInfo.PackagedSearchInfo:
    page_info = api.get_page_info(keyword, start_from, ecd)

    total_artwork_num = page_info.total
    total_page_num = math.ceil(total_artwork_num / 60)

    print(f'Getting page {start_from} / {total_page_num} of {keyword}', end='')
    if ecd:
        print(f' since {ecd.year}-{ecd.month:02d}-{ecd.day:02d}', end='')
    print()

    # with open(PublicConstant.search_history_path, 'rb') as f:
    #     search_history = json.load(f)
    # search_history[keyword] = str(page_info.latest_create_date)
    # with open(PublicConstant.search_history_path, 'w') as f:
    #     f.write(json.dumps(search_history))

    conf = configparser.ConfigParser()
    conf.read(PublicConstant.search_history_path, encoding='utf-8')
    conf[PublicConstant.search_history_section_name][keyword] = str(
        page_info.latest_create_date
    )
    with open(PublicConstant.search_history_path, 'w', encoding='utf-8') as f:
        conf.write(f)

    asyncio.run(
        download_details_of_one_page_without_thumbnails_async(page_info, api)
    )
    return PackagedInfo.PackagedSearchInfo(
        total_artwork_num, total_page_num, len(
            page_info.artworks), page_info.latest_create_date
    )


class CentralController:
    def __init__(self):
        self.settings = Settings()
        self.browser_cookies = BrowserCookiesGetter(
        ).get_cookies_automatically_from_browser()

    def search(self, _search_keyword: str, end_date: datetime.datetime | None = None, start_from: int = 1):
        def clear_search_history():
            _conf = configparser.ConfigParser()
            _conf.read(PublicConstant.search_history_path, encoding='utf-8')
            _conf[PublicConstant.search_history_section_name].pop(
                _search_keyword
            )
            with open(PublicConstant.search_history_path, 'w', encoding='utf-8') as f:
                _conf.write(f)
            # with open(PublicConstant.search_history_path, 'rb') as f:
            #     _search_history = json.load(f)
            # _search_history.pop(_search_keyword)
            # with open(PublicConstant.search_history_path, 'w') as f:
            #     f.write(json.dumps(_search_history))

        pixiv_api = PixivAPI(
            self.settings.proxies,
            self.browser_cookies
        )
        DatabaseOperation.create_table_by_keyword(_search_keyword)

        conf = configparser.ConfigParser()
        conf.read(PublicConstant.search_history_path, encoding='utf-8')
        # with open(PublicConstant.search_history_path, 'rb') as f:
        #     search_history: dict = json.load(f)
        ecd = datetime.datetime.fromisoformat(
            conf.get(PublicConstant.search_history_section_name, _search_keyword)
        ) if conf.get(PublicConstant.search_history_section_name, _search_keyword, fallback=False) else end_date
        while True:
            # print(conf.get(PublicConstant.search_history_section_name,
            #       _search_keyword, fallback=False), ecd)
            search_info = parse_one_page_and_get_search_info(
                _search_keyword, start_from, ecd, pixiv_api
            )
            print(search_info)
            # if type(latest_date_or_total_page_num) == datetime.datetime:
            if search_info.artwork_in_this_page == 0 or (search_info.total_page_num == start_from and start_from == 1):
                clear_search_history()
                return

            page_index: int = start_from + 1
            while True:
                if b'stop\n' == read_from_shared():
                    return

                search_info = parse_one_page_and_get_search_info(
                    _search_keyword, page_index, ecd, pixiv_api
                )
                if page_index == 1000 or search_info.total_page_num == page_index:
                    ecd = search_info.latest_create_date
                    break
                page_index += 1
                continue


def write_to_shared(content: bytes):
    sema.acquire()
    shared_memory.seek(0)
    shared_memory.write(content)
    sema.release()


def read_from_shared() -> bytes:
    sema.acquire()
    shared_memory.seek(0)
    res = shared_memory.readline()
    sema.release()
    return res
