import json
import torch
from transformers import BertTokenizer, BertModel
from scipy.spatial.distance import cosine

# 加载预训练的BERT模型和tokenizer
model_name = 'bert-base-chinese'
tokenizer = BertTokenizer.from_pretrained(model_name)
model = BertModel.from_pretrained(model_name)

# 定义函数来计算文本的BERT向量
def get_bert_embedding(text):
    # 使用tokenizer将文本转换为token
    tokens = tokenizer.tokenize(text)
    # 添加特殊标记 [CLS] 和 [SEP]
    tokens = ['[CLS]'] + tokens + ['[SEP]']
    # 将token转换为对应的ID
    input_ids = tokenizer.convert_tokens_to_ids(tokens)
    # 创建tensor表示输入
    input_tensor = torch.tensor([input_ids])
    # 使用BERT模型获取文本的隐藏状态
    with torch.no_grad():
        outputs = model(input_tensor)
        hidden_states = outputs.last_hidden_state
    # 取出 [CLS] 对应的隐藏状态作为文本的向量表示
    embedding = hidden_states[0][0]
    return embedding

# 定义函数计算文本相似度
def calculate_similarity(text1, text2):
    # 获取文本1和文本2的BERT向量
    embedding1 = get_bert_embedding(text1)
    embedding2 = get_bert_embedding(text2)
    # 使用余弦相似度计算文本相似度
    similarity = 1 - cosine(embedding1, embedding2)
    return similarity

# 读取JSON文件
file_path = r'C:\Users\86130\Downloads\BIRD_train\table_structure.json'
with open(file_path, 'r', encoding='utf-8') as file:
    data = json.load(file)


DB_similarity = []
for database in range(len(data)):
    # 提取第一个表的"columns_names_original"列
    table_data = data[database]
    columns_names_original = table_data['columns_names_original']

    # 提取第一个表的"column_comment"列
    column_comment = table_data['column_comment']

    # 创建字典来存储按数字分类的列表
    classified_lists = {}

    # 按数字分类
    for i in range(len(columns_names_original)):
        number = columns_names_original[i][0]
        name = columns_names_original[i][1]
        comment = column_comment[i]

        # 检查字典中是否已经存在该数字对应的列表
        if number in classified_lists:
            classified_lists[number]['names'].append(name)
            classified_lists[number]['comments'].append(comment)
        else:
            classified_lists[number] = {'names': [name], 'comments': [comment]}
    
    column = []
    comment = []
    simility = 0
    DB = len(classified_lists)
    for number, items in classified_lists.items():
        column.append(items['names'])
        comment.append(items['comments'])
    
    #建立embedding索引
    embedding_index = []
    for index in range(DB):
        emb = []
        des = comment[index]
        for flag in des:
            emb.append(get_bert_embedding(flag))
        embedding_index.append(emb)

    for i in range(DB):
        for j in range(i + 1, DB):
            intersection = 0
            if(len(column[i]) <= len(column[j])):
                des1 = column[i]  #第一个表的字段
                des2 = column[j]  #第二个表的字段
                des3 = embedding_index[i]  #第一个表的注释
                des4 = embedding_index[j]  #第二个表的注释
            else:
                des2 = column[i]  #第一个表的字段
                des1 = column[j]  #第二个表的字段
                des4 = embedding_index[i]  #第一个表的注释
                des3 = embedding_index[j]  #第二个表的注释
            for m in range(len(des1)):
                for n in range(len(des2)):
                    if des1[m] == des2[n] or cosine(des3[m], des4[n]) < 0.2:
                        intersection = intersection + 1
                        break
            simility = simility + intersection / (len(des1) + len(des2) - intersection)
    CDB2 = DB * (DB - 1) / 2
    simility = simility / CDB2
    DB_similarity.append(simility)
    print(simility)
average = sum(DB_similarity) / len(DB_similarity)
print(DB_similarity)
print(average)