from pymongo import MongoClient
import subprocess
import os
from collections import defaultdict

from files import save_obj


def local_execute(command):
    with subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                          encoding="utf-8") as process:
        stdout = process.stdout.readline()
        stderr = process.stderr.readline()
        return stdout, stderr


def get_port(nodes, docker_port):
    primary = nodes[0]  # default TODO: TO detect instead
    command = "docker port jepsen-{} | grep {}".format(primary, docker_port)
    stdout, stderr = local_execute(command)  # Example : stdout: '27017/tcp -> 0.0.0.0:32917\n'
    port = int(stdout.strip('\n').split('->')[1].strip().split(':')[1])
    return port


def get_shards(isSharded=True):
    if isSharded:
        shards = {
            "shard1": ["n4", "n5", "n6"],
            "shard2": ["n7", "n8", "n9"]
        }
        return shards, 27018
    else:
        shards = {
            "shard1": ["n1", "n2", "n3"],
        }
        return shards, 27017


if __name__ == '__main__':

    shards, local_port = get_shards(isSharded=True)

    time_oplogs = {}
    oplogs_by_shard = {}
    lsid_txnNumber_oplogs = dict()

    txn_filter = {'txnNumber': {'$exists': True}, 'lsid': {'$exists': True}}

    for shard, nodes in shards.items():
        port = get_port(nodes, local_port)
        client = MongoClient(host="127.0.0.1", port=port)
        database = client.get_database('local')
        collection = database.get_collection('oplog.rs')
        oplogs = list(collection.find(txn_filter))

        oplogs_by_shard[shard] = oplogs

        print("In {}, there are {} oplogs about transaction".format(shard, len(oplogs)))

        for oplog in oplogs:
            timestamp = oplog['ts']
            time_oplogs[oplog['ts']] = oplog

            txnNumber = oplog['txnNumber']
            lsid = str(oplog['lsid'])
            logs = lsid_txnNumber_oplogs.get(lsid)
            if logs is None:
                lsid_txnNumber_oplogs[lsid] = defaultdict(list)
                logs = lsid_txnNumber_oplogs[lsid]
            oplog['shard'] = shard
            logs[txnNumber].append(oplog)

    shard_oplogs = []
    replica_oplogs = []
    others = []  # sessionMigrateInfo, when data imbalance

    for lsid, logs in lsid_txnNumber_oplogs.items():
        for txnNumber, txn_oplogs in logs.items():
            if len(txn_oplogs) == 1:
                replica_oplogs.append(txn_oplogs)
            elif len(txn_oplogs) == 4:
                shard_oplogs.append(txn_oplogs)
            else:
                others.append(txn_oplogs)

    print("Replica Transactions: ", len(replica_oplogs))
    print("Shard Transactions: ", len(shard_oplogs))
    print("Session Migration Transaction: ", len(others))
    print("Transactions: ", len(replica_oplogs) + len(shard_oplogs) + len(others))

    # Process others
    for other in others:
        for oplog in other:
            if 'applyOps' in oplog['o'].keys():
                replica_oplogs.append([oplog])

    # Process replica set transaction
    replica_txns = []
    for logs in replica_oplogs:
        for log in logs:  # actually there is only one oplog(txn) in logs
            commitTimestamp = log['ts']
            txn = {
                'commitTs': commitTimestamp,
                'ops': [],
                'type': 'replica',
                'participants': log['shard'],
                'wall': log['wall']
            }
            applyOps = log['o']['applyOps']
            for op in applyOps:
                if op['op'] == 'u':
                    k = op['o2']['_id']
                    v = op['o']['$set']['value']
                else:
                    k = op['o']['_id']
                    v = op['o']['value']
                txn['ops'].append(('w', k, v))
            replica_txns.append(txn)

    # Process sharded transaction
    sharded_txns = []
    commitTsSet = set()
    for logs in shard_oplogs:
        # commit
        commitTimestamps = set()
        participants = []
        for log in filter(lambda x: 'commitTransaction' in x['o'].keys(), logs):
            commitTimestamps.add(log['o']['commitTimestamp'])
            commitTsSet.add(log['o']['commitTimestamp'])
            participants.append(log['shard'])

        if len(commitTimestamps) != 1:
            print("[ERROR] Invalid commit timestamp")
            break

        commitTimestamp = commitTimestamps.pop()
        txn = {
            'commitTs': commitTimestamp,
            'ops': [],
            'type': 'sharded',
            'participants': participants
        }

        # prepare
        for log in filter(lambda x: 'applyOps' in x['o'].keys(), logs):
            applyOps = log['o']['applyOps']
            for op in applyOps:
                if op['op'] == 'u':
                    k = op['o2']['_id']
                    v = op['o']['$set']['value']
                else:
                    k = op['o']['_id']
                    v = op['o']['value']
                txn['ops'].append(('w', k, v))

        sharded_txns.append(txn)

    txns = replica_txns.copy()
    txns.extend(sharded_txns)
    txns = sorted(txns, key=lambda x: x['commitTs'])

    for i in range(len(txns)):
        txn = txns[i]
        commitTs = txn['commitTs']
        txn['commitTs'] = (commitTs.time, commitTs.inc)


    save_obj(obj=oplogs_by_shard, filepath="/home/young/Programs/Jepsen-Mongo-Txn/logs/oplogs.json")