import os
import sys
sys.path.append('/share/duli/utils')
sys.path.append('/share/project/duli/content_relation_ana/utils')
#from utils import *
from draw import distrib_ana_tsne
from sklearn.manifold import TSNE
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt

from FlagEmbedding import FlagModel

from tqdm import tqdm
import random
import pdb
import re
import sys
input_file = sys.argv[1]

translated_dict = {
    '沟通与社交媒体': 'Communication and Social Media',
    '语言学知识、多语言与多元文化理解': 'Linguistic Knowledge, Multilingual and Multicultural Understanding',
    '生活知识与技能': 'Life Knowledge and Skills',
    '法律知识': 'Legal Knowledge',
    '开放任务完成': 'Open Task Completion',
    '开放知识问答': 'Open Knowledge Q&A',
    '心理学知识': 'Psychology Knowledge',
    'STEM知识': 'STEM Knowledge',
    '信息处理与整合': 'Information Processing and Integration',
    '分析与研究': 'Analysis and Research',
    '任务生成': 'Task Generation',
    '项目与任务管理': 'Project and Task Management',
    '逻辑与推理': 'Logic and Reasoning',
    '数学能力': 'Ability of Mathematical',
    '创意与设计': 'Creativity and Design',
    '财务、金融与商业知识': 'Finance, Financial and Business Knowledge',
    '文学创作与艺术知识': 'Literary Creation and Art Knowledge',
    '编程与软件开发': 'Programming and Software Development',
    '教育与咨询': 'Education and Counseling',
    '政治、军事战略与安全知识': 'Political, Military Strategy and Security Knowledge',
    '医学、药学与健康知识': 'Medical, Pharmaceutical and Health Knowledge',
    '自然语言处理与理解': 'Natural Language Processing and Understanding',
    '问题解答与支持': 'Problem Solving and Support',
    '人文历史哲学与社会学知识': 'Humanities, History, Philosophy and Sociology Knowledge',
    '法律与安全知识': 'Legal and Security Knowledge',
    '数据科学与分析': 'Data Science and Analysis'
}

def get_unitag_instance(sample_ls, tag_type='ability'):
    tag_dict = {}
    for i, sample in enumerate(sample_ls):
        if tag_type == 'ability':
            try:
                target = sample['label']['ability_en']
            except Exception as e:
                import pdb
                pdb.set_trace()
        elif tag_type == 'cate':
            target = sample['label']['cate_ability_en']
        if len(target)>=1:
            if target[0] not in tag_dict.keys():
                tag_dict[target[0]] = []
            tag_dict[target[0]].append(sample)
    return tag_dict

random.seed(2024)
#os.chdir('/share/project/duli/content_relation_ana/subset_gen')
#sample_ls = load_jsonl('/share/project/duli/content_relation_ana/tag_clean_and_normalization/normalized/tot_llmsys_logicot_wildchat.jsonl')
#sample_ls = load_jsonl('/share/project/lijijie/tools/instruction_follow/0625/tag_sft/7M_all_label_normalize/infinity-instruct-7M-eng.jsonl')
sample_ls = []
import jsonlines
with jsonlines.open(input_file) as f:
    for line in f:
        sample_ls.append(line)
#with jsonlines.open("/share/project/lijijie/tools/instruction_follow/0625/tag_sft/7M_all_label_normalize/infinity-instruct-7M-eng.jsonl") as f:
#    for line in f:
#        sample_ls.append(line)
#tag_dict = get_unitag_instance(sample_ls)
#tag_dict = {k:v for k,v in tag_dict.items() if len(v) > 500}
#tag_dict = {k:random.sample(v, 500) for k,v in tag_dict.items()}

cate_dict = get_unitag_instance(sample_ls, 'cate')
#cate_dict = {k:v for k,v in cate_dict.items() if len(v) > 2000}
#cate_dict = {k:random.sample(v, 2000) for k,v in cate_dict.items()}
#cate_names = [k for k in cate_dict]
text_ls = []
cate_count = {k:len(v) for k,v in cate_dict.items() if len(v) > 2000}
cate_count = sorted(cate_count.items(),key=lambda x:x[1])
cate_dict = {k:random.sample(v, 2000) for k,v in cate_dict.items()}
cate_names = []
cate_ids = []
print(cate_count)
for ki, key in enumerate(cate_count):
    cate_names.append(key[0])
    text_ls += [str(v['conversations'][0]["value"]) for v in cate_dict[key[0]]]
    cate_ids += [ki] * len(cate_dict[key[0]])
model = FlagModel('/share/project/lijijie/tools/transfer_hf/bge-large/bge-large',
                  query_instruction_for_retrieval="Represent this sentence for searching relevant passages:",
                  use_fp16=True)
#model = FlagModel('/share/project/duli/bge', 
#                  query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章：",
#                  use_fp16=True)
#import pdb
#pdb.set_trace()
#text_ls = [str(i['conversations'][0]["value"]) for v in cate_dict.values() for i in v]
repre_vectors = model.encode( text_ls ) # 每个元素是一个字符串

#cate_ids = [i for i, k in enumerate(sorted(cate_dict.keys())) for j in range(len(cate_dict[k]))]
tsne = TSNE(n_components=2, random_state=42)
data_tsne = tsne.fit_transform(repre_vectors)

distrib_ana_tsne(data_tsne, cate_ids, cate_names=cate_names, out_path='./distrbi_unitag.png')

######################################################################
'''
text_ls = [str(i['content']) for v in cate_dict.values() for i in v]
repre_vectors = model.encode( text_ls ) # 每个元素是一个字符串

cate_ids = [i for i, k in enumerate(sorted(cate_dict.keys())) for j in range(len(cate_dict[k]))]
tsne = TSNE(n_components=2, random_state=42)
data_tsne = tsne.fit_transform(repre_vectors)

cate_names = [translated_dict[k] for k in sorted(cate_dict.keys())]
distrib_ana_tsne(data_tsne, cate_ids, cate_names, out_path='./distrbi_unicate.png')


######################################################################

cate_tag_combination = {(i, j):[] for i in cate_dict.keys() for j in cate_dict.keys()}

for sample in sample_ls:
    if tuple(sample['cate_ability']) in cate_tag_combination:
        cate_tag_combination[tuple(sample['cate_ability'])].append(sample)

cate_tag_combination = {k: v for k,v in cate_tag_combination.items() if len(v) > 500}
cate_tag_combination_sample = {k: random.sample(v, 150) for k,v in cate_tag_combination.items()}
# cate_tag_combination_sample = {k:v for k,v in random.sample(cate_tag_combination.items(), 200)}
add_len = sum([len(v) for v in cate_tag_combination_sample.values()])

text_ls_n = text_ls + [str(i['content']) for v in cate_tag_combination_sample.values() for i in v]
repre_vectors = model.encode( text_ls_n ) # 每个元素是一个字符串

cate_ids_n = cate_ids + [max(cate_ids)+1] * add_len

tsne = TSNE(n_components=2, random_state=42)
data_tsne = tsne.fit_transform(repre_vectors)

cate_names_n = cate_names + ['synthesized']
distrib_ana_tsne(data_tsne, cate_ids_n, cate_names_n, out_path='./distrbi_unicate_synthesized.png')

'''
## multiturn ##
'''
from matplotlib.colors import ListedColormap

multiturn_instance = [sample for sample in sample_ls if len(sample['content']) > 2]
multiturn_instance = random.sample(multiturn_instance, 10000)

text_1st = [str(sample['content'][:2]) for sample in multiturn_instance]
text_2nd = [str(sample['content'][2:4]) for sample in multiturn_instance]
text_3rd = [str(sample['content'][4:6]) if len(sample['content'])>=6 else '' for sample in multiturn_instance]
text_ls_n = text_1st + text_2nd + text_3rd
text_ls_n = text_ls + text_ls_n

repre_vectors = model.encode( text_ls_n ) # 每个元素是一个字符串
tsne = TSNE(n_components=2, random_state=42)
data_tsne = tsne.fit_transform(repre_vectors)

cate_ids_n = [max(cate_ids) + 1] * 10000 + [max(cate_ids) + 2] * 10000 + [max(cate_ids) + 3] * 10000
cate_ids_n = cate_ids + cate_ids_n
cate_names_n = cate_names + ['1st', '2nd', '3rd']

distrib_ana_tsne(data_tsne, cate_ids_n, cate_names_n, out_path='./distrbi_multiturn.png', continue_draw=True)

for i in range(len(text_ls), len(text_ls_n)-20000):
     plt.plot((data_tsne[i, 0], data_tsne[i+10000, 0]), (data_tsne[i, 1], data_tsne[i+10000, 1]), linestyle='-', linewidth=0.1, alpha=0.5)
     plt.annotate(
        '', xy=(data_tsne[i+10000, 0], data_tsne[i+10000, 1]), xytext=(data_tsne[i, 0], data_tsne[i, 1]),
        arrowprops=dict(arrowstyle="->", color='blue', lw=0.1, alpha=0.8, mutation_scale=5)
    )

plt.savefig('./distrbi_multiturn.png')

distrib_ana_tsne(data_tsne, cate_ids_n, cate_names_n, out_path='./distrbi_multiturn.png', continue_draw=True)

for i in range(len(text_ls), len(text_ls_n)-20000):
     plt.plot((data_tsne[i+10000, 0], data_tsne[i+20000, 0]), (data_tsne[i+10000, 1], data_tsne[i+20000, 1]), linestyle='-', linewidth=0.1, alpha=0.5)
     plt.annotate(
        '', xy=(data_tsne[i+20000, 0], data_tsne[i+20000, 1]), xytext=(data_tsne[i+10000, 0], data_tsne[i+10000, 1]),
        arrowprops=dict(arrowstyle="->", color='blue', lw=0.1, alpha=0.8, mutation_scale=5)
    )

plt.savefig('./distrbi_multiturn_2_3.png')

# cate_names = ['1st', '2nd', '3rd']
# cmap1 = plt.get_cmap('tab20b')
# cmap2 = plt.get_cmap('tab20c')
# NUM_CATES = len(set(cate_ids))

# new_cmap = ListedColormap(random.sample((cmap1.colors+cmap2.colors), NUM_CATES)[:NUM_CATES])

# plt.figure(figsize=(15,9), dpi=300)  # Make the plot flatter
# plt.subplots_adjust(left=0.1, right=0.7, top=0.9, bottom=0.2)

# scatter = plt.scatter(data_tsne[:, 0], data_tsne[:, 1], c=cate_ids, cmap=new_cmap, marker='o', s=0.1, alpha=0.9)
# cbar = plt.colorbar(scatter)
# cbar.set_ticks([i-float(i)/NUM_CATES for i in range(NUM_CATES)])
# cbar.set_ticklabels(cate_names)
# cbar.set_label('Cluster Label')
# plt.title('DBSCAN Clustering Over t-SNE')
# plt.xlabel('t-SNE Feature 1')
# plt.ylabel('t-SNE Feature 2')
# # plt.savefig('./tsne_llmsys_llmsysseed_1m_152k.png')
# plt.savefig('./distrbi_multiturn.png')

'''
'''
loss match
'''
