import torch
from transformers import BertTokenizer, BertModel
from scipy.spatial.distance import cosine

# 加载预训练的BERT模型和tokenizer
model_name = 'bert-base-chinese'
tokenizer = BertTokenizer.from_pretrained(model_name)
model = BertModel.from_pretrained(model_name)

# 定义函数来计算文本的BERT向量
def get_bert_embedding(text):
    # 使用tokenizer将文本转换为token
    tokens = tokenizer.tokenize(text)
    # 添加特殊标记 [CLS] 和 [SEP]
    tokens = ['[CLS]'] + tokens + ['[SEP]']
    # 将token转换为对应的ID
    input_ids = tokenizer.convert_tokens_to_ids(tokens)
    # 创建tensor表示输入
    input_tensor = torch.tensor([input_ids])
    # 使用BERT模型获取文本的隐藏状态
    with torch.no_grad():
        outputs = model(input_tensor)
        hidden_states = outputs.last_hidden_state
    # 取出 [CLS] 对应的隐藏状态作为文本的向量表示
    embedding = hidden_states[0][0]
    return embedding

# 定义函数计算文本相似度
def calculate_similarity(text1, text2):
    # 获取文本1和文本2的BERT向量
    embedding1 = get_bert_embedding(text1)
    embedding2 = get_bert_embedding(text2)
    # 使用余弦相似度计算文本相似度
    similarity = 1 - cosine(embedding1, embedding2)
    return similarity

import os
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import csv
import glob
import re
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np

# 读取CSV文件，使用GB2312编码
file_path = 'C:\\Users\\86130\\Downloads\\BIRD_train\\表结构.csv'
df = pd.read_csv(file_path, encoding='GB2312')
df = df.fillna(0)

# 按照 '数据库表名' 列分类
grouped = df.groupby('数据库表名')

# 存储结果的字典
result_dict = {}

# 遍历每个分类
for table_name, group in grouped:
    # 将 '字段名称' 和 'text' 列放入列表
    field_names = group['字段名称'].tolist()
    system_field_names = group['text'].tolist()

    # 将结果存储到字典
    result_dict[table_name] = {'字段名称': field_names, 'text': system_field_names}

simility = 0
table_name = list(result_dict.keys())
values = list(result_dict.values())
for i in range(len(table_name)):
    for j in range(i + 1, len(table_name)):
        intersection = 0
        values1 = values[i]
        values2 = values[j]
        if(len(values1['字段名称']) <= len(values2['字段名称'])):
            des1 = values1['字段名称']
            des2 = values2['字段名称']
            des3 = values1['text']
            des4 = values2['text']
        else:
            des2 = values1['字段名称']
            des1 = values2['字段名称']
            des4 = values1['text']
            des3 = values2['text']
        for m in range(len(des1)):
            for n in range(len(des2)):
                if(des3[m] != 0 and des4[n] != 0):
                    if des1[m] == des2[n] or calculate_similarity(des3[m],des4[n]) > 0.8:
                        #print(calculate_similarity(des3[m],des4[n]))
                        intersection = intersection + 1
                        break
                else:
                    if des1[m] == des2[n]:
                        intersection = intersection + 1
                        break
        simility = simility + intersection / (len(des1) + len(des2) - intersection)
        print(simility)
CDB2 = len(table_name) * (len(table_name)-1) / 2
simility = simility / CDB2
print(simility)