"""
Read from keywords.txt
Calculate word count & frequency

|word|count|frequency|
......................
"""
import pandas as pd
from pathlib import Path
from tqdm import tqdm
import argparse

import html
import ftfy
import re


def basic_clean(text):
    text = ftfy.fix_text(text)
    text = html.unescape(html.unescape(text))
    return text.strip()


def whitespace_clean(text):
    text = re.sub(r'\s+', ' ', text)
    text = text.strip()
    return text


def normalize(data: list):
    """
    -> [0, 1]
    """
    max_num, min_num = max(data), min(data)
    gap = max_num - min_num
    normal_data = [(x - min_num) / gap for x in data]
    return normal_data


def calculate_count(dataset_path: str, save_path: str = './test_wordcount.csv') -> pd.DataFrame:
    """
    Calculate word count
    """
    if Path(save_path).exists():
        print(f"{save_path} exists")
        df = pd.read_csv(save_path, sep=',')
        return df

    dataset = pd.read_csv(dataset_path, sep=',')
    captions = dataset['caption'].tolist()
    words = []
    for caption in captions:
        caption = whitespace_clean(basic_clean(caption)).lower()
        words += caption.strip().split(' ')

    total_wordnum = len(words)
    word_set = set(words)
    wordcount = []

    for w in tqdm(word_set, desc='counting words'):
        num_of_w = words.count(w)
        wordcount.append( (w, num_of_w, num_of_w / total_wordnum) )
    wordcount = sorted(wordcount, key=lambda t: (-t[1], t[0]))

    pd_word = [x[0] for x in wordcount]
    pd_count = [x[1] for x in wordcount]
    pd_frequency = [x[2] for x in wordcount]
    pd_normal_frequency = normalize(pd_frequency)
    df = pd.DataFrame({
        'word': pd_word,
        'count': pd_count,
        'word_frequency': pd_frequency,
        'norm_frequency': pd_normal_frequency
    })
    df.to_csv(save_path, sep=',', index=False)
    return df


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--dataset",
        type=str,
        default=None,
        help="test | valid | train",
    )
    args = parser.parse_args()
    dataset = args.dataset

    calculate_count(
        dataset_path=f"/remote-home/share/medical/public/ROCO/{dataset}/radiology/processed_{dataset}.csv",
        save_path=f"./{dataset}_wordcount.csv"
    )
