"""
Usage:
preprocessing.py -h | --help
preprocessing.py gb2312 [--of <output_file>]
preprocessing.py nankai [--od <output_folder>] [--seed <seed>]
preprocessing.py ecdict [--if <input_file>] [--of <output_file>] [--word_max_len=<num>] [--word_min_len=<num>] [--word_sample_size=<num>] [--seed=<seed>]
preprocessing.py generate [--if <input_file>] [--od <output_folder>] [--font_folder <font_folder>] \
                          [--mod <mod>] [--data_aug_size <size>] [--image_size <image_size>] [--font_size <font_size>] [--seed=<seed>]
preprocessing.py convert [--if <input_folder>]
preprocessing.py mvfr [--dir <file_folder>]

sub cmd:
convert: convert the image of Input_folder to 'L' mode 

options:
-h --help                   #Show this document.
--if=<input_file>           #Specity the input file. The DEFAULT value is defined in main().
--of=<output_file>          #Specify the output file. The DEFAULT value is defined in main().
--od=<output_folder>        #Specity the output_folder. The DEFAULT value is defined in main().
--seed=<seed>               #in `ecdict` and `generate`: Specify the random seed. [default: 42]

--word_max_len=<num>        #in `ecdict`: the max length of word in ecdict when filtering [default: 10]
--word_min_len=<num>        #in `ecdict`: the min length of word in ecdict when filtering [default: 9]
--word_sample_size=<num>    #Specity the number of words for sampling from unique_words of ecdict [default: 1000]

--font_folder=<font_folder>     #in `generate`: the directory of fonts. [default: ./data/fonts]
--mod=<mod>                 #in `generate`: the data mod. 'train' or 'val', or 'test'
--data_aug_size=<size>      #in `generate`: the number of DataAugumented copies of font_image. [default: 5]
--image_size=<image_size>   #in `generate`: the size of generated image. [default: (200, 200)]
--font_size=<font_size>     #in `generate`: determin the font size in font_image. [default: 96]

--dir=<folder>              #in `mvfr`
"""

import sys
from pathlib import Path

from torch.utils.tensorboard.summary import image
from utils import get_charlist, get_gb2312_chars, get_ecdict_words, get_Nankai_chars, generator, convert_to_L
from docopt import docopt
import pandas as pd
import ast
import random
import shutil

def main(args):
    if args['--help']:
        print(__doc__)
        sys.exit(0)

    if args['gb2312']:
        # gb2312 characters is uesed for generating test dataset.

        seed = int(args.get('--seed'))
        random.seed(seed)
        print(f"Using seed: {seed}")
        output_folder = args.get('--od') or './data/charlist/gb2312'
        output_folder = Path(output_folder)
        train_chars_file = output_folder.joinpath('train_chars.csv')
        val_chars_file = output_folder.joinpath('val_chars.csv')
        test_chars_file = output_folder.joinpath('test_chars.csv')
        if not output_folder.exists():
            output_folder.mkdir(parents=True, exist_ok=True)
            print(f"Create directory '{output_folder}'")

        # get the gb2312_chars
        gb2312_chars = get_gb2312_chars()
        train_chars = random.sample(gb2312_chars, 800)
        _test_chars = list(set(gb2312_chars) - set(train_chars))
        test_chars = random.sample(_test_chars, 200)
        try:
            pd.Series(train_chars).to_csv(str(train_chars_file), header=['char'], index_label='index')
            pd.Series(test_chars).to_csv(str(test_chars_file), header=['char'], index_label='index')
            print(f"Saved {len(train_chars)} train characters from Nankai Dataset to {train_chars_file}")
            print(f"Saved {len(test_chars)} test characters from Nankai Dataset to {test_chars_file}")
        except Exception:
            print(f"Exception raised in saving Nankai characters to {output_folder}")
        finally:
            sys.exit(0)

    if args['nankai']:
        seed = int(args.get('--seed'))
        random.seed(seed)
        print(f"Using seed: {seed}")
        output_folder = args.get('--od') or './data/charlist/nankai'
        output_folder = Path(output_folder)
        train_chars_file = output_folder.joinpath('train_chars.csv')
        val_chars_file = output_folder.joinpath('val_chars.csv')
        test_chars_file = output_folder.joinpath('test_chars.csv')
        if not output_folder.exists():
            output_folder.mkdir(parents=True, exist_ok=True)
            print(f"Create directory '{output_folder}'")

        train_chars, val_chars = get_Nankai_chars()
        # sample 200 chars from gb2312 , uesed as the test chars.
        gb2312_chars = get_gb2312_chars()
        _test_chars = list(set(gb2312_chars) - set(train_chars + val_chars))
        test_chars = random.sample(_test_chars, 200)
        try:
            pd.Series(train_chars).to_csv(str(train_chars_file), header=['char'], index_label='index')
            pd.Series(val_chars).to_csv(str(val_chars_file), header=['char'], index_label='index')
            pd.Series(test_chars).to_csv(str(test_chars_file), header=['char'], index_label='index')
            print(f"Saved {len(train_chars)} train characters from Nankai Dataset to {train_chars_file}")
            print(f"Saved {len(val_chars)} val characters from Nankai Dataset to {val_chars_file}")
            print(f"Saved {len(test_chars)} test characters from Nankai Dataset to {test_chars_file}")
        except Exception:
            print(f"Exception raised in saving Nankai characters to {output_folder}")
        finally:
            sys.exit(0)

    if args['ecdict']:
        # ecdict words is uesed for future task.

        input_file = args.get('--if') or './data/ecdict/ecdict.csv'
        output_file = args.get('--of') or './data/charlist/ecdict_words.csv'
        word_min_len = int(args.get('--word_min_len'))
        word_max_len = int(args.get('--word_max_len'))
        word_sample_size = int(args.get('--word_sample_size'))
        seed = int(args.get('--seed'))
        # print(type(word_max_len))
        # print(type(word_min_len))
        # print(type(word_sample_size))
        # print(output_file)
        words_sample = get_ecdict_words(input_file, word_min_len, word_max_len, word_sample_size, seed)
        try:
            pd.Series(words_sample).to_csv(str(output_file), header=['char'], index_label='index')
            print(f"Saved {word_sample_size} unique_words of ECDICT to {output_file}")
        except Exception:
            print(f"Exception raised in saving sampled_words to {output_file}")
        finally:
            sys.exit(0)

    if args['mvfr']:
        # extract 800 font_img from original mvfr_Bangla data, used to validation
        seed = int(args.get('--seed'))
        data_folder = args.get('--dir') or './dataset/MVFR_Bangla/'
        data_folder = Path(data_folder)
        assert data_folder.exists()
        train_folder = data_folder/'train'
        val_folder = data_folder/'val'
        num_val_imgs = 800
        if not train_folder.exists():
            print(f"train folder {train_folder} is not exist")
            sys.exit(0)
        if not val_folder.exists():
            val_folder.mkdir(parents=True, exist_ok=True)
            print(f"目标目录 {val_folder} 已创建。")
        for subdir in train_folder.iterdir():
            if subdir.is_dir():
                # 获取当前子目录中的所有 .jpg 文件
                jpg_files = list(subdir.glob("*.jpg"))
                total_files = len(jpg_files)

                # 检查是否足够文件
                if total_files < num_val_imgs:
                    print(f"子目录 {subdir} 中的 .jpg 文件数量不足，仅包含 {total_files} 个文件。")
                    continue

                # 随机选择文件
                selected_files = random.sample(jpg_files, num_val_imgs)

                # 创建目标子目录
                target_subdir = val_folder / subdir.name
                target_subdir.mkdir(parents=True, exist_ok=True)

                # 移动文件
                for file in selected_files:
                    try:
                        shutil.move(str(file), str(target_subdir / file.name))
                        print(f"文件 {file.name} 已从 {subdir} 移动到 {target_subdir}。")
                    except Exception as e:
                        print(f"移动文件 {file.name} 时出错: {e}")

        print("所有文件移动完成。")


    if args['generate']:
        # generate font_image based on the word_list and fonts
        csv_file = args.get('--if')
        mod = args.get('--mod')
        output_folder = args.get('--od') or './dataset/'
        font_folder = args.get('--font_folder')
        data_aug_size = int(args.get('--data_aug_size'))
        image_size = ast.literal_eval(args.get('--image_size'))
        font_size = int(args.get('--font_size'))
        seed = int(args.get('--seed'))

        generator(csv_file, mod, output_folder, font_folder, data_aug_size, image_size, font_size, seed ) 

    if args['convert']:
        # convert image in input_folder to 'L' mode image
        convert_to_L(args.get('--if'))



if __name__ == '__main__':
    args = docopt(__doc__)
    # print(args)
    main(args)
