
# from pymilvus import connections, db
 
# conn = connections.connect(host="36.111.128.85", port=19530)
# # database = db.create_database("sample_db")


# db.using_database("sample_db")
# db.list_database()




# from pymilvus import CollectionSchema, FieldSchema, DataType

# from pymilvus import Collection, utility, connections, db

# import numpy as np
 
# conn = connections.connect(host="36.111.128.85", port=19530)
# print(db.list_database())
# db.using_database("sample_db")
 
# m_id = FieldSchema(name="m_id", dtype=DataType.INT64, is_primary=True,)
# embeding = FieldSchema(name="embeding", dtype=DataType.FLOAT_VECTOR, dim=768,)
# count = FieldSchema(name="count", dtype=DataType.INT64,)
# desc = FieldSchema(name="desc", dtype=DataType.VARCHAR, max_length=256,)
# schema = CollectionSchema(
#   fields=[m_id, embeding, desc, count],
#   description="Test embeding search",
#   enable_dynamic_field=True
# )
 
# collection_name = "word_vector"
# collection = Collection(name=collection_name, schema=schema, using='default', shards_num=2)

# index_params = {
#   "metric_type": "IP",
#   "index_type": "IVF_FLAT",
#   "params": {"nlist": 1024}
# }
 
# collection = Collection("word_vector")
# collection.create_index(
#   field_name="embeding",
#   index_params=index_params
# )
 
# utility.index_building_progress("word_vector")

# coll_name = 'word_vector'
# mids, embedings, counts, descs = [], [], [], []
# data_num = 100
# for idx in range(0, data_num):
#     mids.append(idx)
#     embedings.append(np.random.normal(0, 0.1, 768).tolist())
#     descs.append(f'random num {idx}')
#     counts.append(idx)
 
# collection = Collection(coll_name)
# mr = collection.insert([mids, embedings, descs, counts])
# print(mr)



# from pymilvus import connections, Collection

# # 连接到 Milvus 服务
# connections.connect("default", host="localhost", port="19530")

# # 加载集合到内存中
# collection_name = "CUPL_test"
# collection = Collection(name=collection_name)
# collection.load()


# search_vector = [1.1599862575531006, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]

# search_vector = [0,0,0,0.8467705249786377,0,0.8742109537124634,0.65] + [0.0]*121
# print(search_vector)
# # 定义搜索参数，这里使用 L2 距离和 IVF_FLAT 索引
# search_params = {"metric_type": "L2", "params": {"nprobe": 10}}

# # 执行搜索操作
# results = collection.search(
#     data=[search_vector],
#     anns_field="embedding",  # 指定用于搜索的字段
#     param=search_params,
#     limit=3,  # 返回最接近的3个结果
#     expr=None,  # 如果有表达式过滤条件可以在这里指定
#     output_fields=['ITEM_ID']
# )

# print("Search results:", results)
# print("Search results[0] are the top-3 most similar vectors to the query vector")
# # 打印搜索结果
# for result in results[0]:
#     print(result)




# results = collection.search(
#     data=[np.random.normal(0, 0.1, 768).tolist()],
#     anns_field="embeding",
#     param=search_params,
#     limit=16,
#     expr=None,
#     # output_fields=['m_id', 'embeding', 'desc', 'count'],
#     output_fields=['m_id', 'desc', 'count'],
#     consistency_level="Strong"
# )

  # 假设你想查询的ITEM_ID是'item123'
# item_id_to_search = 1731

# # 构建查询表达式，这里使用等于操作符
# expr = f'ITEM_ID == {item_id_to_search}'

# # 执行查询
# results = collection.query(expr=expr)
# print("Query results:", results)
# # 输出查询结果
# for record in results:
#     print(record)



import pymysql
import pandas as pd

# 数据库连接参数
conn = pymysql.connect(
        host='10.240.0.8',
        user='lsp_rec_book',
        passwd='WaFQ_CRqZekqzxSKgr3l',
        port=38141,
        db='lsp_rec_book_sql',  # 连接到数据库
        charset='utf8'
    )
# 假设你的DataFrame是这样的格式
data = {
    'tag': ['历史', '心理', '玄幻'],
    'idList': ['12|456|78', '6856|3456|87', '564|6|99|7878']
}
df = pd.DataFrame(data)
print(df)



with conn.cursor() as cursor:
  # 检查表是否存在并清空数据
  cursor.execute("DROP TABLE IF EXISTS test_tag2recordId")
  cursor.execute("""
      CREATE TABLE test_tag2recordId (
          tag VARCHAR(255),
          idList TEXT
      )
  """)
  print("Table created")
  conn.commit()
   # 使用 executemany 批量插入数据
  insert_query = "INSERT INTO test_tag2recordId (tag, idList) VALUES (%s, %s)"
  cursor.executemany(insert_query, df.itertuples(index=False))
  print("Data inserted")
  conn.commit()

with conn.cursor() as cursor:
  cursor.execute("""
      SELECT idList
      FROM test_tag2recordId
      WHERE tag = '玄幻';
  """)
  
  # 获取查询结果
  results = cursor.fetchall()
  print(results[0][0])
  # print(type(results[0][0]))
  # # 输出结果
  # for row in results:
  #     print(row[0])

# try:
#     # 如果表存在，先删除
#     df.to_sql('test_tag2recordId', con=conn, if_exists='replace', index=False)
# except pymysql.MySQLError as e:
#     print(f"数据库操作出错: {e}")
# finally:
#     conn.close()