import os
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import csv
import glob
from sklearn.metrics.pairwise import cosine_similarity
sim_list = []
directory_path = r'C:\Users\86130\Downloads\BIRD_dev\dev\dev_databases\dev_databases'
#directory_path = r'C:\Users\86130\Downloads\BIRD_train\train\train_databases\train_databases'
# 获取目录下所有文件和子目录
contents = os.listdir(directory_path)
# 过滤出子目录名
folder_names = [content for content in contents if os.path.isdir(os.path.join(directory_path, content))]


def create_description(path):
    with open(path, 'r', newline='', encoding='latin-1') as file:
        # 创建CSV读取器
        csv_reader = csv.DictReader(file)
        column_description_list = []
        
        # 获取列名
        fieldnames = csv_reader.fieldnames
        
        # 确保 'column_description' 列存在于列名中
        if 'column_description' in fieldnames:
            # 逐行读取CSV内容
            for row in csv_reader:
                # 将 'column_description' 列的值添加到列表中
                column_description_list.append(row['column_description'])
    return column_description_list

def all_empty(my_list):
    return all(element == "" for element in my_list)

# 连接路径并打印
for folder_name in folder_names:
    folder_path = os.path.join(directory_path, folder_name)
    description_path = os.path.join(folder_path, 'database_description')
    # 检查目录是否存在
    #if os.path.exists(description_path) and os.path.isdir(description_path):
        # 获取database_description目录下的所有文件名
    csv_files = glob.glob(f'{description_path}/*.csv')
    description_contents = [file.split('/')[-1] for file in csv_files]
    #description_contents = os.listdir(description_path)
    DB = []
    sim = 0
    # 输出每个文件的完整路径
    for file_name in description_contents:
        file_path = os.path.join(description_path, file_name)
        DB.append(file_path)

    for i in range(len(DB)):
        for j in range(i + 1, len(DB)):
            intersection = 0
            path1 = DB[i]
            path2 = DB[j]
            des1 = create_description(path1)
            des2 = create_description(path2)
            len1 = len(des1)
            len2 = len(des2)
            # name_intersection = len(set(des1) & set(des2))
            # intersection = intersection + name_intersection
            if(len1 <= len2):
                documents = des1 + des2
            else:
                documents = des2 + des1
            
            if((all_empty(des1) != True) and (all_empty(des2) != True)):
                # 词袋模型：文本向量化
                count_vectorizer = CountVectorizer(stop_words='english')
                sparse_matrix = count_vectorizer.fit_transform(documents)
                # 文本向量化的可视化表格
                doc_term_matrix = sparse_matrix.todense()
                df = pd.DataFrame(doc_term_matrix, columns=count_vectorizer.get_feature_names_out())

                cos = cosine_similarity(df)

                if(len1 <= len2):
                    for m in range(len1):
                        for n in range(len2):
                            if((cos[m][n + len1]) > 0.8) or des1[m] == des2[n]:
                                intersection = intersection + 1
                                break
                else:
                    for m in range(len2):
                        for n in range(len1):
                            if((cos[m][n + len2]) > 0.8) or des1[n] == des2[m]:
                                intersection = intersection + 1
                                break
                simlility = float(intersection / (len1 + len2 - intersection))
                sim = sim + simlility
    CDB2 = len(DB) * (len(DB)-1) / 2
    sim = sim / CDB2
    # folder_name = os.path.basename(folder_path)
    # print(folder_name, sim)
    sim_list.append(sim)


directory_path = r'C:\Users\86130\Downloads\BIRD_train\train\train_databases\train_databases'
# 获取目录下所有文件和子目录
contents = os.listdir(directory_path)
# 过滤出子目录名
folder_names = [content for content in contents if os.path.isdir(os.path.join(directory_path, content))]


def create_description(path):
    with open(path, 'r', newline='', encoding='latin-1') as file:
        # 创建CSV读取器
        csv_reader = csv.DictReader(file)
        column_description_list = []
        
        # 获取列名
        fieldnames = csv_reader.fieldnames
        
        # 确保 'column_description' 列存在于列名中
        if 'column_description' in fieldnames:
            # 逐行读取CSV内容
            for row in csv_reader:
                # 将 'column_description' 列的值添加到列表中
                column_description_list.append(row['column_description'])
    return column_description_list

def all_empty(my_list):
    return all(element == "" for element in my_list)

# 连接路径并打印
for folder_name in folder_names:
    folder_path = os.path.join(directory_path, folder_name)
    description_path = os.path.join(folder_path, 'database_description')
    # 检查目录是否存在
    #if os.path.exists(description_path) and os.path.isdir(description_path):
        # 获取database_description目录下的所有文件名
    csv_files = glob.glob(f'{description_path}/*.csv')
    description_contents = [file.split('/')[-1] for file in csv_files]
    #description_contents = os.listdir(description_path)
    DB = []
    sim = 0
    # 输出每个文件的完整路径
    for file_name in description_contents:
        file_path = os.path.join(description_path, file_name)
        DB.append(file_path)

    for i in range(len(DB)):
        for j in range(i + 1, len(DB)):
            intersection = 0
            path1 = DB[i]
            path2 = DB[j]
            des1 = create_description(path1)
            des2 = create_description(path2)
            len1 = len(des1)
            len2 = len(des2)
            # name_intersection = len(set(des1) & set(des2))
            # intersection = intersection + name_intersection
            if(len1 <= len2):
                documents = des1 + des2
            else:
                documents = des2 + des1
            
            if((all_empty(des1) != True) and (all_empty(des2) != True)):
                # 词袋模型：文本向量化
                count_vectorizer = CountVectorizer(stop_words='english')
                sparse_matrix = count_vectorizer.fit_transform(documents)
                # 文本向量化的可视化表格
                doc_term_matrix = sparse_matrix.todense()
                df = pd.DataFrame(doc_term_matrix, columns=count_vectorizer.get_feature_names_out())

                cos = cosine_similarity(df)

                if(len1 <= len2):
                    for m in range(len1):
                        for n in range(len2):
                            if((cos[m][n + len1]) > 0.8) or des1[m] == des2[n]:
                                intersection = intersection + 1
                                break
                else:
                    for m in range(len2):
                        for n in range(len1):
                            if((cos[m][n + len2]) > 0.8) or des1[n] == des2[m]:
                                intersection = intersection + 1
                                break
                simlility = float(intersection / (len1 + len2 - intersection))
                sim = sim + simlility
    CDB2 = len(DB) * (len(DB)-1) / 2
    sim = sim / CDB2
    # folder_name = os.path.basename(folder_path)
    # print(folder_name, sim)
    sim_list.append(sim)
sim_list = [100 * round(element, 2) for element in sim_list]
print(sim_list)
average = sum(sim_list) / len(sim_list)
print(average)
# count = 0
# for i in sim_list:
#     if(i >= 9):
#         count = count + 1
# print(count)