# import py2neo
# from py2neo import Graph,Node,Relationship,NodeMatcher,RelationshipMatcher
# import csv

# 删除之前所有的数据   match (n) detach delete n
# graph = Graph('http://192.168.86.128:7474', user='neo4j', password='12345678', name="neo4j")#填入自己当初安装neo4j设定的账号密码
# with open('C:/Users/yinwe/Desktop/hlm.csv','r') as f:#填入数据表存放路径
#     reader=csv.reader(f)
#     for item in reader:
#         s_node=Node("Person",name=item[0])
#         e_node=Node("Person",name=item[1])
#         r=Relationship(s_node,item[2],e_node)
#         g.merge(s_node,"Person","name")
#         g.merge(e_node,"Person","name")
#         g.merge(r,"Label","name")
# g.delete_all()

# matcher = NodeMatcher(graph)    # 创建节点匹配器
 
# result = matcher.match("资产").where('_.name="XXX刑事判决书"').first() # 符合条件的是否存在
# print('节点是否存在：')
# print(result)
# if result == None:
#     node = Node('判决文书', name = 'XXX刑事判决书')
#     graph.create(node)
# else:
#     node = matcher.match('判决文书').where(name = 'XXX刑事判决书').first()  # 评估匹配并返回匹配的第一个节点
#     print('node')
#     print(node)

# 判断节点是否存在
# def nodeExist(graph, src_ip, src_port):
#     matcher = NodeMatcher(graph)
#     # m = matcher.match(label, ip == ip & port == port).first()
#     result = matcher.match("源主机").where('_.src_ip="'+src_ip+'" AND'+' _.src_port="'+src_port+'"').first()
#     if result is None:
#         return False
#     else:
#         print(result)
#         return True

# print(nodeExist(graph, '121.199.71.198', '25173'))
# result = matcher.match("源主机").where('_.src_ip="121.199.71.198" AND _.src_port="25173"').first()
# print(type(result))
# result['num'] += 1
# graph.push(result)

# node1 = Node('Src Host', '121.199.71.198')
# node2 = Node('Dst Host', '39.105.244.217')

# relation_matcher = RelationshipMatcher(graph)
# r = relation_matcher.match({node1, node2}, r_type='Connect').where('_.dst_port="'+'39.105.244.217'+'"').first()
# r['num']=2
# graph.push(r)


# import pandas as pd

# 假设你已经读取了日志数据并存储在一个DataFrame中，假设该DataFrame为logs
# logs = pd.read_csv('logs.csv')

# 为了方便说明，我们使用一个简化的示例DataFrame
# logs = pd.DataFrame({
#     'src_ip': ['192.168.1.1', '192.168.1.2', '192.168.1.1', '192.168.1.3', '192.168.1.1'],
#     'dst_ip': ['10.0.0.1', '10.0.0.2', '10.0.0.1', '10.0.0.3', '10.0.0.1'],
#     'dst_port':['10001', '10002', '10001', '10003', '10001']
# })

# # 统计某一src_ip到某一dst_ip次数
# result = logs.groupby(['src_ip', 'dst_ip', 'dst_port']).size().reset_index(name='count')

# # 输出结果
# print(type(result))
# print(result)


# import pandas as pd

# # 读取CSV文件
# log_data = pd.read_csv('data.csv')

# # 统计连接次数
# connection_counts = log_data.groupby(['src_ip', 'dst_ip', 'dst_port']).size().reset_index(name='count')

# # 合并连接次数列到原始DataFrame
# log_data = log_data.merge(connection_counts, on=['src_ip', 'dst_ip', 'dst_port'])

# # 将结果保存到CSV文件
# log_data.to_csv('kmeans2.csv', encoding='utf-8', index=False)

import time 

print(type(time.time()))