"""
Read from keywords.txt
Remove Low-frequency words
"""
import pandas as pd
from pathlib import Path
from tqdm import tqdm
import argparse
import re


def calculate_count(keywords_path: str, save_path: str = './test_keywords.csv') -> pd.DataFrame:
    """
    Calculate word count
    """
    if Path(save_path).exists():
        print(f"{save_path} exists")
        df = pd.read_csv(save_path, sep=',')
        return df

    with open(keywords_path) as f:
        s = f.readlines()    
    words = []
    for line in tqdm(s, desc='read lines'):
        words.extend(line.strip().split('\t')[2:])
    word_set = set(words)

    wordcount = [(w, words.count(w)) for w in tqdm(word_set, desc='word count')]
    wordcount = dict(sorted(wordcount, key=lambda t: (-t[1], t[0])))

    pd_word = list(wordcount.keys())
    pd_count = list(wordcount.values())
    df = pd.DataFrame({'word': pd_word, 'count': pd_count})
    df.to_csv(save_path, sep=',', index=False)
    return df


def collect_words(df: pd.DataFrame, count: int = 1):
    """
    Collect words with count <=... from data
    """
    indexes = df.loc[df['count'] <= count]
    words = indexes['word'].tolist()
    return words


def sub(text, regex):
    """
    Remove word within text
    """
    new_text = re.sub(regex, '', text)  # word removed
    return new_text


def filter_words(df_data: pd.DataFrame, words: [str]):
    """
    Filter out [words] from df_data
    """
    words = [r'\b' + word + r'\b' for word in words if type(word) == str]
    regex = '|'.join(words)

    pbar = tqdm(total=len(df_data))
    for index, row in tqdm(df_data.iterrows(), desc='filter words'):
        caption = row['caption']
        # print(f"Prev: {df_data.loc[index, 'caption']}")
        caption = sub(text=caption, regex=regex)
        df_data.loc[index, 'caption'] = caption
        # print(f"Post: {df_data.loc[index, 'caption']}")
        # if index > 20:
        #     raise ValueError(index)
        pbar.update(1)
    pbar.close()
    return df_data


def main(dataset='test', count=1):
    dir_path = Path(f"/remote-home/share/medical/public/ROCO/{dataset}/radiology")
    txt_path = dir_path / "keywords.txt"
    csv_path = dir_path / f"processed_{dataset}.csv"
    data = pd.read_csv(csv_path, sep=',')
    df_count = calculate_count(keywords_path=txt_path, save_path=f'./{dataset}_keywords.csv')
    words = collect_words(df=df_count, count=count)
    processed_data = filter_words(df_data=data, words=words)
    processed_data.to_csv(dir_path / f'filtered{count}_{dataset}.csv', sep=',', index=False)


if __name__ == '__main__':
    print('Filter out low-frequency words')
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--dataset",
        type=str,
        default=None,
        help="test | valid | train",
    )
    parser.add_argument(
        "--thresh-count",
        type=int,
        default=None,
        help="word with less count than thresh_count would be filtered out",
    )
    args = parser.parse_args()
    main(dataset=args.dataset, count=args.thresh_count)
