from urllib.parse import quote_plus
from pymongo import MongoClient
from pymongo.errors import OperationFailure
from bson.son import SON
import pandas as pd
def get_database_info(database_name,collection_name):
    # 数据表定义
    #columns = ['Database', 'Collection', 'Document Count', 'Collection Size (bytes)']
    columns = ['ip', 'count']
    df = pd.DataFrame(columns=columns)

    # MongoDB 连接信息
    mongo_host = 'xxxx'
    mongo_port = 8030
    mongo_user = 'root'
    mongo_password = 'xxxxxx'
    escaped_password = quote_plus(mongo_password)
    escaped_host = quote_plus(mongo_host)

    # 连接 MongoDB
    connection_string = f'mongodb://{mongo_user}:{escaped_password}@{mongo_host}:{mongo_port}/admin'
    client = MongoClient(connection_string)

    # 指定要获取信息的数据库
    db = client[database_name]

    # 从文件中读取 activityId 值
    with open('activity_ids.txt', 'r') as file:
        activity_ids = file.readlines()
    actor_ids = [activity_id.strip() for activity_id in activity_ids]
    for actorId in actor_ids:
        # 获取数据库中所有集合
        collection = db[collection_name]
        # 构造查询条件
        query = {
            "add_time": {
                "$gte": "2024-04-17 15:00:00",
                "$lte": "2024-04-24 19:00:00"
            },
            "actorId":  f"{actorId}"
        }
        #聚合查询
        pipeline = [
            {"$match": query},
            {"$group": {"_id": "$ip", "count": {"$sum": 1}}},
            {"$sort": {"count": -1}},
            {"$project": {"_id": 0, "ip": "$_id", "count": 1}}
        ]

        results = collection.aggregate(pipeline)

        for result in results:
         try:
           df = df.append({'ip': result['ip'], 'count': result['count']}, ignore_index=True)
         except Exception as op_err:
           print(f"exception due to:  op_err")
        filename = f"{actorId}.csv"
        

if __name__ == "__main__":
    # 指定要获取信息的数据库名称
    database_name = "xxx"
    collection_name = "xxxx"
    get_database_info(database_name, collection_name)