# import os
# import shutil
# import time
# # 设置源文件夹和目标文件夹的路径
# source_folder = 'LSP_book_rec/Dataset/SourceUniversity'
# target_folder = 'LSP_book_rec/Dataset/TestUniversity'

# # 确保目标文件夹存在
# if not os.path.exists(target_folder):
#     print("?????????")
#     time.sleep(20)
# # 遍历源文件夹中的所有文件
# for filename in os.listdir(source_folder):
#     # 构造完整的文件路径
#     source_file_path = os.path.join(source_folder, filename)
#     target_file_path = os.path.join(target_folder, filename)

#     # 检查是否为文件
#     if os.path.isfile(source_file_path):
#         # 读取文件的前2000行
#         with open(source_file_path, 'r', encoding='utf-8') as file:
#             lines = []
#             for i in range(20000):
#                 line = file.readline()
#                 if not line:
#                     break
#                 lines.append(line)

#         # 将读取的行写入到新文件中
#         with open(target_file_path, 'w', encoding='utf-8') as new_file:
#             new_file.writelines(lines)

# print("文件复制完成。")


# import csv

# # 列名称
# column_names = ['tag_name', 'tag_describe']

# # CSV文件的路径
# csv_file_path = 'zz_file.csv'

# # 打开文件并创建CSV writer对象
# with open(csv_file_path, mode='w', newline='', encoding='utf-8') as file:
#     writer = csv.writer(file)
    
#     # 写入列名称
#     writer.writerow(column_names)

# print(f"列名称已写入文件：{csv_file_path}")



# password = b'dds5ADaF6064$^45,fds'


# import requests
# import json
# import hashlib
# import time

# group_code = '200572.0'
# mapping_path = 'cupledu'

# aeskey = 'dds5ADaF6064$^45,fds'

# # 计算签名
# def generate_sign(group_code, mapping_path, aeskey, date):
#     sign_str = f'{mapping_path}{group_code}{aeskey}{date}'
#     return hashlib.md5(sign_str.encode('utf-8')).hexdigest().upper()

# headers = {
#     'groupcode': group_code,
#     'mappingpath': mapping_path,
#     'sign': generate_sign(group_code, mapping_path, aeskey, time.strftime('%Y-%m-%d'))
# }

# params = {
#     'itemId': -1,  # 初始值，表示获取第一条数据
#     'rows': 1000   # 一次请求获取的数据量
# }

# # 发送请求的函数
# def send_request(uri, params, headers):
#     response = requests.post(uri, json=params, headers=headers)
#     if response.status_code == 200:
#         return response.json()  # 返回解析后的JSON数据
#     else:
#         return None

# api_uri = 'http://118.186.60.40:1015/'
# result = send_request(api_uri, params, headers)

# print(json.dumps(result, indent=2, ensure_ascii=False))

# # 访问获取副本信息的API接口
# # api_uri = 'http://118.186.60.40/dataopen/sync/syncPhysicalItem'
# # api_uri = 'http:/dataopen/sync/syncPhysicalItem'

# # api_uri = 'http://118.186.60.40:1015/dataopen/sync/syncPhysicalItem'


# import json
# import hashlib
# import time
# import requests

# group_code = '200572'
# mapping_path = 'cupledu'
# aeskey = 'dds5ADaF6O64$^45,fds'

# # 计算签名
# def generate_sign(group_code, mapping_path, aeskey, date):
#     sign_str = f'{mapping_path}{group_code}{aeskey}{date}'
#     return hashlib.md5(sign_str.encode('utf-8')).hexdigest().upper()

# headers = {
#     'Group-Code': group_code,
#     'Mapping-Path': mapping_path,
#     'Data-Signature': generate_sign(group_code, mapping_path, aeskey, time.strftime('%Y-%m-%d'))
# }
# # print(time.strftime('%Y-%m-%d'))
# # print(generate_sign(group_code, mapping_path, aeskey, time.strftime('%Y-%m-%d')))
# params = {
#     'itemId': -1,  # 初始值，表示获取第一条数据
#     'rows': 1000   # 一次请求获取的数据量
# }

# # 发送请求的函数
# def send_request(uri, params, headers):
#     response = requests.post(uri, json=params, headers=headers)
#     if response.status_code == 200:
#         return response.json()  # 返回解析后的JSON数据
#     else:
#         return None

# api_uri = 'http://118.186.60.40:1015/dataopen/sync/syncPhysicalItem'
# result = send_request(api_uri, params, headers)

# print(json.dumps(result, indent=2, ensure_ascii=False))

# 访问获取副本信息的API接口
# api_uri = 'http://118.186.60.40/dataopen/sync/syncPhysicalItem'
# api_uri = 'http:/dataopen/sync/syncPhysicalItem'

# api_uri = 'http://118.186.60.40:1015/dataopen/sync/syncPhysicalItem'


# import pandas as pd

# # 假设df1和df2是你的两个DataFrame
# df1 = pd.DataFrame({
#     'recordid': [1, 2,2,2, 3, 4,4,4],

# })

# df2 = pd.DataFrame({
#     'recordid': [1, 2, 3, 4],
#     'tag': ['X', 'Y', 'Z', 'W']
# })

# # 使用merge函数通过'recordid'合并两个DataFrame
# merged_df = pd.merge(df1, df2, on='recordid')

# # 打印合并后的DataFrame
# print(merged_df)

# import pandas as pd

# # 创建一个示例 DataFrame
# data = {
#     'ITEM_ID': [101, 102, 101, 103, 102, 104],
#     'VALUE': ['A', 'B', 'C', 'D', 'E', 'F']
# }
# item_df = pd.DataFrame(data)
# print(item_df)
# # 删除重复项，只保留每个ITEM_ID最后一次出现的行
# item_df_unique = item_df.drop_duplicates(subset='ITEM_ID', keep='last')
# print(item_df_unique)


# if __name__ == __main__:

#     user_tower_data_input = {name : user_df[name] for name in user_sparse_features + user_dense_features}
#     user_tower_data_input["user_hist"] = new_user_hist
    
#     print("加载模型......")
#     model_save_path = data_config['model_dir']
#     model = load_model(model_save_path) 
#     print("加载完毕......")
#     user_model_input = []
#     for input_tensor in model.input:
#         if input_tensor.name in user_sparse_features + user_dense_features + ["user_hist"]:  
#             user_model_input.append(input_tensor)
#     print(user_model_input)



# import numpy as np
# import pandas as pd
# import json  
# from sklearn.metrics import log_loss, roc_auc_score
# from sklearn.model_selection import train_test_split
# from sklearn.preprocessing import LabelEncoder, MinMaxScaler
# from tensorflow.keras.preprocessing.sequence import pad_sequences
# from tensorflow.python.keras.metrics import AUC 
# from models.dssm import DSSM
# from processing.feature_column import SparseFeat, get_feature_names, VarLenSparseFeat, DenseFeat
# from tensorflow.keras.models import Model, load_model
# from tensorflow.keras.optimizers import Adam
# from utils import Negative_Sample
# import os
# import pickle
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'  
    

# # 筛选出具有多个唯一值的用户特征列,保存处理后的用户数据到CSV文件。
# def get_user_df(data_config, df):
#     #读取用户表
#     print("正在读取用户表。。。")
#     # user_df = pd.read_excel(data_config['user_path'], engine='openpyxl')
#     user_df = pd.read_csv(data_config['user_path'])
#     # user_df = pd.read_csv(data_config['saved_processed_data_path'] + '/all_user_data.csv')
#     print("读取用户表完毕")
#     user_columns = []

#     for feature_dict in data_config['data_config']['user_data']:
#         if user_df[feature_dict['input_name']].nunique() != 1:
#             user_columns.append(feature_dict['input_name'])

#     user_df = user_df[user_columns]

#     if not os.path.exists(data_config['saved_processed_data_path']):   
#         os.makedirs(data_config['saved_processed_data_path']) 

#     unique_ids = df[~df[user_id].isin(user_df[user_id])][user_id] 
#     other_user_id = pd.DataFrame({user_id: unique_ids})
#     user_df = pd.concat([user_df, other_user_id], ignore_index=True) 
#     user_df.to_csv(data_config['saved_processed_data_path'] + '/all_user_data.csv', index=False, sep=',', mode='w', header=True)

#     user_sparse_features = [] 
#     user_dense_features = [] 
    
#     with open(data_config['saved_processed_data_path'] + '/all_feats_name.json', 'r') as f:  
#         feats_name_lt = json.load(f) 
#     feats_name_lt = feats_name_lt['feats_name']

#     for user_fea in data_config['data_config']['user_data']:
#         if user_fea["input_name"] not in feats_name_lt:
#             continue
#         if user_fea["feature_type"] == "IdFeature":
#             user_sparse_features.append(user_fea["input_name"])
#         elif user_fea["feature_type"] == "DenseFeature":
#             user_dense_features.append(user_fea["input_name"])

#     user_df[user_sparse_features] = user_df[user_sparse_features].fillna(-2)
#     user_df[user_dense_features] = user_df[user_dense_features].fillna(0)
#     user_df[user_dense_features] = user_df[user_dense_features].astype(float) 

#     return user_df, user_sparse_features, user_dense_features


# def get_var_feature(data, col):
#     key2index = {}
#     def split(x):
#         key_ans = x.split('|')
#         for key in key_ans:
#             if key not in key2index:
#                 # Notice : input value 0 is a special "padding",so we do not use 0 to encode valid feature for sequence input
#                 key2index[key] = len(key2index) + 1
#         return list(map(lambda x: key2index[x], key_ans))
#     var_feature = list(map(split, data[col].values))
#     var_feature_length = np.array(list(map(len, var_feature)))
#     max_len = max(var_feature_length)
#     var_feature = pad_sequences(var_feature, maxlen=max_len, padding='post', )
#     return key2index, var_feature, max_len


# def get_test_var_feature(data, col, key2index, max_len):
#     def split(x):
#         key_ans = x.split('|')
#         for key in key_ans:
#             if key not in key2index:
#                 # Notice : input value 0 is a special "padding",so we do not use 0 to encode valid feature for sequence input
#                 key2index[key] = len(key2index) + 1
#         return list(map(lambda x: key2index[x], key_ans))
#     test_hist = list(map(split, data[col].values))
#     test_hist = pad_sequences(test_hist, maxlen=max_len, padding='post', )
#     return test_hist

# pd.set_option('display.max_colwidth', None)  # None 表示不限制列宽
# pd.set_option('display.max_columns', None)
# if __name__ == "__main__":

#     # train, val, test, data = data_process()
#     import sys
#     if len(sys.argv) != 2:
#         print("使用方法: python run_update_user_embedding.py [配置文件路径]")
#         sys.exit(1)
    
#     file_path = sys.argv[1]
  
#     with open(file_path, 'r') as file:  
#         data_config = json.load(file)

#     with open(file_path, 'r') as file:  
#         data_config = json.load(file)

#     #读取借阅历史表
#     print("正在读取借阅历史表。。。")
#     # df = pd.read_excel(data_config['loan_path'], engine='openpyxl')
#     df = pd.read_csv(data_config['loan_path'])

#     print("读取借阅历史表完毕")
#     user_id = data_config['data_config']['loan_data']['user_id']['input_name']
#     item_id = data_config['data_config']['loan_data']['item_id']['input_name']
#     loan_date = data_config['data_config']['loan_data']['loan_date']['input_name']

#     user_df, user_sparse_features, user_dense_features = get_user_df(data_config, df)
#     user_id_df = user_df[user_id]

#     df = df.sort_values(by=loan_date, ascending=True) 

#     data_group = df[[user_id, item_id]].groupby(user_id).agg(list).reset_index()

#     # 对借阅历史数据按时间排序，并按用户ID分组，然后对用户的行为序列进行截断
#     data_group[item_id] = data_group[item_id].apply(lambda x: x[-50:] if len(x) > 50 else x) 

#     data_group['user_hist'] = data_group[item_id].apply(lambda x: '|'.join([str(i) for i in x]))
#     user_df = pd.merge(user_df, data_group.drop(item_id, axis=1), on=user_id, how='left')
#     user_df = user_df.drop_duplicates(subset=[user_id]) 
#     user_df['user_hist'] = user_df['user_hist'].fillna("")

#     #TODO：对于拉取的新用户，需要对应的更新这个key2index.json
#     with open(data_config['user_key2index'] + '/user_key2index.json', 'r') as f:  
#         user_key2index = json.load(f) 
    
#     print("before new user hist: ",user_df)
#     new_user_hist = get_test_var_feature(user_df, 'user_hist', user_key2index, 50)
#     print("new_user_hist: ",new_user_hist)
#     print("开始编码")
#     #特征编码
#     with open(data_config['feature_transformer_dir'] + '/sparse_labelencoder.pkl', 'rb') as f:  
#         sparse_labelencoder = pickle.load(f)
#     with open(data_config['feature_transformer_dir'] + '/dense_minmaxscaler.pkl', 'rb') as f:  
#         dense_minmaxscaler = pickle.load(f)

#     # def set_unknown_to_nan_simplified(val, known_labels):  
#     #     return -2 if val not in known_labels else val 
    
#     import warnings 
#     with warnings.catch_warnings():  
#         warnings.simplefilter("ignore")
#         # 1.Label Encoding for sparse features,and process sequence features
#         for feat in user_sparse_features:
#             lbe = sparse_labelencoder[feat]

#             is_known = user_df[feat].isin(lbe.classes_)  
#             user_df.loc[~is_known, feat] = -2
#             user_df[feat] = lbe.transform(user_df[feat])
                
#         mms = dense_minmaxscaler["user_dense_features"]
#         user_df[user_dense_features] = mms.transform(user_df[user_dense_features])
    
#     user_tower_data_input = {name : user_df[name] for name in user_sparse_features + user_dense_features}
#     user_tower_data_input["user_hist"] = new_user_hist
    
#     print("加载模型......")
#     model_save_path = data_config['model_dir']
#     model = load_model(model_save_path) 
#     print("加载完毕......")



#     user_model_input = []
#     for input_tensor in model.input:
#         if input_tensor.name in user_sparse_features + user_dense_features + ["user_hist"]:  
#             user_model_input.append(input_tensor)

#     user_tower = Model(inputs=user_model_input, outputs=model.get_layer("user_embedding").output)
    
#     print("user_tower_input: ",user_tower_data_input)
#     user_embedding = user_tower.predict(user_tower_data_input)
#     print("user_embedding shape: ", user_embedding.shape)
    
#     np.save(data_config['embedding_saved_dir'] + "/user_embedding.npy", user_embedding)



#     print("*****************************************")
#     import faiss


#     item_index = faiss.read_index(os.path.join(data_config['embedding_saved_dir'], "item_embedding.index"))
#     user_embeddings = np.load(data_config['embedding_saved_dir'] + "/user_embedding.npy")


#     D, I = item_index.search(user_embeddings, 3)  # xq为待检索向量，返回的I为每个待检索query最相似TopK的索引list，D为其对应的距离
#     print(item_index.ntotal)
#     print(type(D))

#     print('nearest vector ids:\n',I[:30],'\n')
#     print('metric(distances/scores) to query:\n',D[-50:],'\n')

# import pymysql

# def search_test(item_ids,groupName,cursor):
#     sql_select = "SELECT TITLE_S FROM book_attr WHERE RECORD_ID IN %s"
#     sql_select = sql_select.replace('book_attr', f'{groupName}_book_attr')
#     cursor.execute(sql_select,(item_ids,))
#     results = cursor.fetchall()
#     record_ids = [record[0] for record in results]
#     return record_ids

# L = [4568,6476,79,348974,164974,64486,46,684]
# conn = pymysql.connect(
#         host='10.240.0.8',
#         user='lsp_rec_book',
#         passwd='WaFQ_CRqZekqzxSKgr3l',
#         port=38141,
#         db='lsp_rec_book_sql',  # 连接到数据库
#         charset='utf8'
#     )
# cursor = conn.cursor()
# print(search_test(L,'test',cursor))

# from utils import get_gpt_ans
# prompt= "are u still here?"
# L = ['民商法理论争议问题 : 无权处分', '兼并与收购', '数学与金融', '企业财务分析', '国家垄断资本主义共性与特点. 上', '耶路撒冷 : 论宗教权利与犹太教', '拯救全球金融', '获得权威 : 上海地下党群众工作的历史经验与启示', '给一个未出生孩子的信', '招标投标法实施条例条文解读与案例分析', '化解产能过剩问题研究', '著作权法体系化研究', '卫生法学通论', '谈美 谈美书简', '丰乳肥臀', '新编经济法教程 : 微课版', 'Food and agricultural code, annotated, of the State of California  :adopted March 15, 1967, with amendments through adjournment of the 1981-1982 Legislature .32051 to 58000 ', '杂技与魔术', 'Antitrust economics on trial  :a dialogue on the new laissez-faire ', '发展型国家及其未来可能性研究']
# prompt = f"根据以下信息，请生成一个吸引人且具有创意的10个字以内的中文'书单名'与50个字以内的'书单简介'。\
#                     - 书单包含以下书籍：{L}\
#                     请确保书单名反映书单的特点和主题。\
#                     "
# print(prompt)
# print(get_gpt_ans(prompt))

# import numpy as np
 
# # 构造数据
# import time
# d = 50                           # dimension
# nb = 100                     # database size
# np.random.seed(1234)             # make reproducible
# xb = np.random.random((nb, d)).astype('float32')
# xb[:, 0] += np.arange(nb) / 1000.
 
# print(xb[:1])
 
# # 写入文件中
# np.savetxt('data.txt', xb)


# import pandas as pd

# # 假设 user.csv 文件位于当前工作目录中
# file_path = "/opt/wyh/LSP_book_rec/Manual_backup/YanBianUniversity/user.csv"

# # 尝试读取CSV文件
# try:
#     user_data = pd.read_csv(file_path)

#     # 查找 USER_ID 为 141192 的行
#     result = user_data[user_data['USER_ID'] == 141192]
# except FileNotFoundError:
#     result = "文件未找到，请确保文件路径正确。"
# except KeyError:
#     result = "CSV文件中不存在USER_ID列，请检查列名是否正确。"

# # 输出结果
# print(result)

# import json
# path = "./Dataset/YanBianUniversity/user_key2index.json"
# with open(path, 'r') as f:
#     user_dict = json.load(f)
# record_id = 140619
# print(user_dict.get(str(record_id)))



# import json

# def find_duplicate_keys(file_path):
#     # 用于存储已经出现过的键
#     seen_keys = set()
#     duplicates = []

#     # 读取并解析JSON文件
#     with open(file_path, 'r') as file:
#         user in users:
#         # 遍历用户字典中的键
#         for key in user.keys():
#             # 如果键已存在，则添加到重复列表中
#             if key in seen_keys:
#                 duplicates.append(key)
#             else:
#                 seen_keys.add(key)

#     return duplicates

# # 假设 user.json 文件位于当前目录下
# file_path = "./Dataset/YanBianUniversity/user_key2index.json"
# duplicates = find_duplicate_keys(file_path)

# # 输出重复的键
# print("Duplicate keys found:")
# for dup in duplicates:
#     print(dup)s = json.load(file)
#     # print(users)
#     # 遍历每个用户的信息
#     for user


# import json
# import hashlib
# import time
# import requests
# import pymysql
# import os
# import sqlite3
# import pandas as pd
# import time

# from fetch_utils import generate_sign,fetch_loan_history,fetch_user,fetch_item,fetch_book_attr,fetch_metadata,create_sql_database,initial_load
# # from TagClass import Tag

# groupName = 'YBU'
# json_file = f'../configs/{groupName}.json'
# base_url,headers,conn = initial_load(json_file)

# result = fetch_user(base_url,headers,10981,3)
# print(result)



# 
#import time
#from utils import get_gpt_ans
#
#book_infos = ['经济犯罪立案·定罪·量刑标准. 2005年最新版', '法律服务  会计审计', '非理性及其价值研究', '边读边悟《易经》', '花痴日记', '法律人，你为什么不争气？ : 法律伦理与理想的重建', '商务谈判. 第2版', '徐向前回忆录', '榆下杂说', '炼狱圣徒 : 陀思妥耶夫斯基传', '医药购销领域商业贿赂罪适用解析', '开放', '出土文献与法律史研究', '行政许可 : 法和经济学 : a law & economics approach', '《药品经营质量管理规范》(2012年修订)实施精讲', '中外歷史年表 : 校订本 : 公元前4500年-公元1918年', '理性与责任 : 实践理性的两个基本概念 : Ein philosophischer Essay uber praktische Vernunft', '数字经济引领高质量发展', '黑龙江经济发展报告. 2020', 'ラテンアメリカ諸囯の経済関係法']
#prompt = f"根据以下信息，请生成一个吸引人且具有创意的10个字以内的中文'书单名'与50个字以内的'书单简介'。\
#                    - 书单包含以下书籍：{book_infos}\
#                    请确保书单名反映书单的特点和主题。\
#                    "
#start_time = time.time()
#for i in range(100):
#    s1 = time.time()
#    print(get_gpt_ans(prompt))
#    print("total time",time.time()-s1)
#print("total time",time.time()-start_time)


from pymilvus import connections, Collection

# 连接到 Milvus 服务
connections.connect("default", host="localhost", port="19530")

# 加载集合到内存中
collection_name = "CUPL"
collection = Collection(name=collection_name)
collection.load()


search_vector = [1.1599862575531006, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]

search_vector = [0,0,0,0.8467705249786377,0,0.8742109537124634,0.65] + [0.0]*121
print(search_vector)
# 定义搜索参数，这里使用 L2 距离和 IVF_FLAT 索引
search_params = {"metric_type": "L2", "params": {"nprobe": 10}}

# 执行搜索操作
results = collection.search(
    data=[search_vector],
    anns_field="embedding",  # 指定用于搜索的字段
    param=search_params,
    limit=3,  # 返回最接近的3个结果
    expr=None,  # 如果有表达式过滤条件可以在这里指定
    output_fields=['ITEM_ID']
)

print("Search results:", results)
print("Search results[0] are the top-3 most similar vectors to the query vector")
# 打印搜索结果
for result in results[0]:
    print(result)




