import argparse
import pandas as pd
import os
import json
import sys
import jsonlines
sys.path.append('/share/project/duli/content_relation_ana/utils')
#from utils import *
import re
from tqdm import tqdm
from collections import Counter
import pdb

def clean_tags(tag_string):
    tag_ls = tag_string.strip('<>').split('><')
    tag_ls = [j.strip().lower() for t in tag_ls for j in t.split('> <')]
    tag_ls = [j.strip().lower() for t in tag_ls for j in t.split('>')]
    tag_ls = [j.strip().lower() for t in tag_ls for j in t.split('<')]
    tag_ls = [t.replace(' ','').replace('＜', '').replace('＞','').replace('-',' ').replace('/','').replace('&','').replace('{','').replace('}','').replace("\'",'').lower() for t in tag_ls]
    tag_ls = [t.replace(',','').replace('，','').replace('.', ' ').strip() for t in tag_ls]
    tag_ls = [t.replace(',','').strip() for t in tag_ls]
    
    return tag_ls


def normalize_tag(tags, tag_type):
    # tags = set(tags).difference(meaningless_tags[tag_type])
    tags = set(tags)
    tags = list(tags)

    normalized_tags = []
    for ith_tag, tag in enumerate(tags):
        #normalized = False
        # for ith in range(len(convert_tags[tag_type]['target'])):
        #     if convert_tags[tag_type]['source'][ith] == tag:
        #         # normalized_tags.append(re.sub(convert_tags[tag_type]['source'][ith], convert_tags[tag_type]['target'][ith], tag))
        #         # normalized_tags.append(tag.replace(convert_tags[tag_type]['source'][ith], convert_tags[tag_type]['target'][ith]))
        #         # pdb.set_trace()
        #         tag = tag.replace(convert_tags[tag_type]['source'][ith], convert_tags[tag_type]['target'][ith])
        #         normalized = True
        #         # break
        L = list(convert_tags.keys())
        for ith in range(len(convert_tags)):
            if L[ith] == tag:
                tag = convert_tags[tag]

        L = list(convert_tags_high_freq.keys())
        for ith in range(len(convert_tags_high_freq)):
            if L[ith] == tag:
                tag = convert_tags_high_freq[tag]

        normalized_tags.append(tag)
        #if not normalized:
        #    normalized_tags.append(tag)
    normalized_tags = list(set(normalized_tags))
    return normalized_tags


def match_cate(tags, cate_match_dict):
    matched_cate_ls = []
    for tag in tags:
        try:
            matched_cate_ls.append(cate_match_dict[tag])
        except:
            pass
    return list(set(matched_cate_ls))

def lan_convert_dict_reformat(convert_dict_ls):
    convert_dict = {}
    for i in convert_dict_ls:
        convert_dict[i['keywords_zh']] = i['keywords_en']
        convert_dict[i['keywords_zh']] = convert_dict[i['keywords_zh']].strip('.').lower()

    return convert_dict

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--data_path", type=str, default='')  # data path
    parser.add_argument("--patterns_path", type=str, default='normalize_patterns')  # data path
    parser.add_argument("--tag_type", type=str, default='ability') #start index
    parser.add_argument("--output_dir", type=str, default='./tag_clean_and_normalization/normalized/ability', required=False)  # tensor_parallel_size
    parser.add_argument("--normalize", action='store_true', required=False)  # tensor_parallel_size
    
    return parser.parse_args()

if __name__ == "__main__":
    args = parse_args()
    #wkdir = '/share/project/duli/content_relation_ana'
    #os.chdir(wkdir)
    tag_type = args.tag_type
    if True:
        with open(args.data_path, 'r') as f:
            dat_ls = [json.loads(line) for line in tqdm(f.readlines())]
        meaningless_tags = json.load(open(os.path.join(args.patterns_path, 'meaningless_tags.json')))
        convert_tags = json.load(open(os.path.join(args.patterns_path, 'convert_tags.json')))
        convert_tags_high_freq = json.load(open(os.path.join(args.patterns_path, 'convert_tags_high_freq.json')))

        # match 2nd tags for 3rd tags
        cate_match_dict = json.load(open(os.path.join(args.patterns_path, f'high_freq_tag_{args.tag_type}_cate_convert_dict.json')))
        cate_match_dict.update(json.load(open(os.path.join(args.patterns_path, f'long_tail_tag_{args.tag_type}_cate_convert_dict.json'))))
        print(meaningless_tags)
        absent_tag_ls = []
        cleaned_ls = []
        tag_ls = []
        num_tag_absent = 0
        num_cate_absent = 0
        for ith, dat in tqdm(enumerate(dat_ls)):
            try:
                dat['label'][tag_type] = clean_tags(dat['label'][tag_type])
            except Exception as e:
                #cleaned_ls.append(dat)
                continue
            dat['label'][tag_type] = normalize_tag(dat['label'][tag_type], tag_type)
            dat[f'cate_{args.tag_type}'] = match_cate(dat['label'][tag_type], cate_match_dict)
            dat['label'][tag_type] = [i.strip() for i in dat['label'][tag_type]]
            dat[f'cate_{args.tag_type}'] = [i.strip() for i in dat[f'cate_{args.tag_type}']]
            
            #pdb.set_trace()
            if len(dat['label'][tag_type]) > 0 and len(dat[f'cate_{args.tag_type}']) > 0:
                cleaned_ls.append(dat)
            elif len(dat['label'][tag_type]) == 0:
                absent_tag_ls.append(dat)
                num_tag_absent += 1
            elif len(dat[f'cate_{args.tag_type}']) == 0:
                absent_tag_ls.append(dat)
                num_cate_absent += 1
            tag_ls.extend(dat['label'][tag_type])
        print(len(cleaned_ls))

        print('length of instances without tag or category_tag:', num_tag_absent, num_cate_absent)
        # save_jsonl(absent_tag_ls, os.path.join(args.output_dir, f'absent_{args.tag_type}.jsonl'))
        #save_jsonl(cleaned_ls, os.path.join(args.output_dir, f'cleaned_{args.tag_type}.jsonl'))
        with jsonlines.open(os.path.join(args.output_dir, f'cleaned_{args.tag_type}.jsonl'),"w") as wf:
           for line in cleaned_ls:
               wf.write(line) 
        # pdb.set_trace()

    '''
    Combine the re-tag dataset, including Baize and belle_multiturn_chat
    '''
    sample_ls = []
    with jsonlines.open(os.path.join(args.output_dir, f'cleaned_{args.tag_type}.jsonl')) as f:
        for line in f:
            sample_ls.append(line)

    tag_ls = []

    for sample in sample_ls:
        tag_ls.extend(sample['label'][tag_type])
    tag_counter = Counter(tag_ls)
    del_tags = set([k for (k,v) in tag_counter.items() if v < int((len(sample_ls) * 3) / 1e5)])

    sample_ls_n = []
    for ith, sample in enumerate(sample_ls):
        sample['label'][tag_type] = list(set(sample['label'][tag_type]).difference(del_tags))
        if len(sample['label'][tag_type]) > 0:
            sample_ls_n.append(sample)
        # else:
        #     absent_ls.append(sample)

    f = open('normalize_patterns/tag_list_en.json', 'r')
    tag_conv_dict = lan_convert_dict_reformat(json.load(f))
    f.close()

    f = open('normalize_patterns/cate_list_en.json', 'r')
    cate_conv_dict = lan_convert_dict_reformat(json.load(f))
    f.close()
    
    for sample in sample_ls_n:
        sample['label']['ability_en'] = []
        sample['cate_ability_en'] = []
        del_ls_tmp = []
        for label in sample['label']['ability']:
            try:
                sample['label']['ability_en'].append(tag_conv_dict[label])
            except:
                del_ls_tmp.append(label)
        for cate in sample['cate_ability']:
            try:
                sample['cate_ability_en'].append(cate_conv_dict[cate])
            except:
                pass
        sample['label']['ability'] = list(set(sample['label']['ability']).difference(del_ls_tmp))
        
        sample['label']['ability_zh'] = sample['label']['ability']
        sample['label']['cate_ability_zh'] = sample['cate_ability']
        sample['label']['cate_ability_en'] = sample['cate_ability_en']

        del sample['label']['ability']
        del sample['cate_ability']
        del sample['cate_ability_en']
        
    import jsonlines
    with jsonlines.open(os.path.join(args.output_dir, f'infinity-instruct-7M-eng.jsonl'),"w") as wf:
        for line in sample_ls_n:
            wf.write(line)
   
    
