from FlagEmbedding import FlagModel
# import torch
import pandas as pd
import numpy as np
import os
import time
import threading
import concurrent.futures
import sys, datetime
sys.path.append("./")
from logs.log import setup_custom_logger
current_path = os.getcwd()
print("当前路径是:", current_path)
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"

# 日志系统初始化
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
LOG_DIR = os.path.join(os.path.dirname(__file__), '..', 'Logfiles')
print("LOG_DIR", LOG_DIR)
file_name = str(datetime.datetime.now().strftime('%Y-%m-%d'))
my_logger = setup_custom_logger(os.path.join(LOG_DIR, '%s.log' % file_name), log_level="INFO")

class Tag:
    
    def __init__(self, process_tag_path):
        """
            给图书记录自动打标签。主要功能是从数据库中查询图书信息，将其与标签描述进行匹配，然后基于语义相似度分配相关标签
        """ 
        # self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = FlagModel('/opt/zzg-cx/models/models--BAAI--bge-large-zh-v1.5/snapshots/bge_large_v1.5', 
                    query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章：",
                    use_fp16=True)
        
        self.process_tag_path = process_tag_path
        self.tag_embeddings,self.tag_list = self._load_tag_embedding()
        # my_logger.info("init tag model success!")

    def _load_tag_embedding(self):

        # TODO 这里后续考虑使用向量数据库进行存储，做数据的持久化。

        df = pd.read_csv(self.process_tag_path)
        tag_lt = df['tag_name'].to_list()
        tag_lt2 = df['tag_describe'].to_list()
        q_embeddings = self.model.encode_queries(tag_lt2)
        return q_embeddings,tag_lt
        
    def add_tag(self,record_id_list,groupName,cursor):
        # with conn.cursor() as cursor:
        if True:
            query_book_attr = """
                SELECT RECORD_ID, TITLE_S, AUTHOR, SERIES
                FROM book_attr
                WHERE RECORD_ID IN %s;
                """
            query_metadata = """
                SELECT RECORD_ID, FIELD_DATA
                FROM metadata
                WHERE FIELD = '330' AND RECORD_ID IN %s;
                """
            unique_record_ids = list(set(record_id_list))
            sql_script = query_book_attr.replace('book_attr', f'{groupName}_book_attr')
            cursor.execute(sql_script, (tuple(unique_record_ids),))
            book_attr_df = pd.DataFrame(cursor.fetchall(), columns=['RECORD_ID', 'TITLE_S', 'AUTHOR', 'SERIES'])

            sql_script = query_metadata.replace('metadata', f'{groupName}_metadata')
            cursor.execute(sql_script, (tuple(unique_record_ids),))
            metadata_df = pd.DataFrame(cursor.fetchall(), columns=['RECORD_ID', 'FIELD_DATA'])

            #  数据合并与处理
            df = pd.merge(book_attr_df, metadata_df, on='RECORD_ID', how='left') 
            # df = df.set_index('RECORD_ID').reindex(unique_record_ids).reset_index()
            # print(df)
            book_id = df['RECORD_ID'].tolist()
            def concat_strings(row):  
                # 初始化一个空字符串  
                result = ''  
                # 遍历指定的列名  
                for col in ['TITLE_S', 'AUTHOR', 'SERIES', 'FIELD_DATA']:  
                    # 如果列值不是NaN，则将其转换为字符串并添加到结果中  
                    if pd.notna(row[col]):
                        result += str(row[col]) + " " 
                return result  

            df['concatenated'] = df.apply(lambda row: concat_strings(row), axis=1) 
            book_info_lt = df['concatenated'].tolist()


            # book_info_lt = [', '.join(str(item) for item in row if item is not None) for row in results]
            # my_logger.info("start to get tag")
            self.book_info_embeddings = self.model.encode(book_info_lt)
            scores = self.tag_embeddings @ self.book_info_embeddings.T
            top_indices = []
            for col in range(scores.shape[1]):  
                sorted_indices = np.argsort(scores[:, col])[::-1][:2].tolist()  
                sorted_ind_lt = [sorted_indices[0]]
                for ind_sort in sorted_indices[1:]:
                    if scores[ind_sort, col] > 0.35:
                        sorted_ind_lt.append(ind_sort)
                top_indices.append(sorted_ind_lt)
            tag = []
            for ind in top_indices:
                s_tag = []
                for i in ind:
                    s_tag.append(self.tag_list[i])
                tag.append("|".join(s_tag))
            df['tag'] = tag
            df = df[['RECORD_ID','tag']]
            df.rename(columns={'RECORD_ID': 'recordId'}, inplace=True)
            my_logger.info("get tag success!!")
            return df

if __name__ == '__main__':

    json_file='../configs/test.json'
    # from pull_data_from_sql import main
    from fetch_utils import initial_load
    chaoxingyun_url,headers,conn = initial_load(json_file)
    test = Tag("/opt/wyh/LSP_book_rec/Label_books_with_categories/processed_data/SourceUniversity/process_tag.csv")
    record_id_list = [12,285,285,867,45,285,786,6,2656,654,13415,4684,84648,1417]
    groupName = 'CUPL'
    with conn.cursor() as cursor:
        test.add_tag(record_id_list,groupName,cursor)

