import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from impl.db.database import DatabaseConnection
from impl.db.model.record import BingCrawlerRecordDAO
import os
from datetime import datetime
import csv
from tqdm import tqdm


def db_dump_records(db_config: dict, save_folder: str, with_id_offset: bool = True):
    id_file = os.path.join(save_folder, 'dumped_max_id.txt')
    dumped_max_id = -1
    if with_id_offset:
        if os.path.exists(id_file):
            with open(id_file) as f:
                dumped_max_id = int(f.read().strip())
    db = DatabaseConnection(db_config)
    csv_file_path = os.path.join(save_folder, f'{datetime.now().strftime("%Y.%m.%d-%H.%M.%S-%f")}.csv')
    id_ = None
    with open(csv_file_path, 'w', newline='', encoding='utf-8') as fid, db:
        record_dao = BingCrawlerRecordDAO(db)
        csv_writer = csv.writer(fid, delimiter=',')
        for id_, category_identity, image_id, search_word, file_name, url, source_url, source_image_url, caption in tqdm(record_dao.get_iterator_with_id_limits(dumped_max_id + 1)):
            csv_writer.writerow((category_identity, image_id, search_word, file_name, url, source_url, source_image_url, caption))
    if with_id_offset and id_ is not None:
        with open(id_file, 'w') as fid:
            fid.write(str(id_))


import argparse


if __name__ == '__main__':
    parser = argparse.ArgumentParser('Dump meta data from mysql to CSV files')
    parser.add_argument('save_folder', type=str, help='Folder path to store CSV')
    parser.add_argument('--track-dumped-id', action='store_true', help='Skip dumped records')
    args = parser.parse_args()

    from db_utils._get_db_config import get_db_config
    from db_utils.count_images import db_count_images
    db_dump_records(get_db_config(), args.save_folder, args.track_dumped_id)
    db_count_images(get_db_config(), args.save_folder)
