#!/usr/bin/python2.7

#
# https://eth.wiki/en/concepts/ethash/ethash
#
# This one points out some issues, but gets several things wrong, too:
# https://github.com/lukovkin/ethash
#
# The issues found in the lokovkin version:
#
# - The encode_int(nonce) does not produce the expected byte string of fixed
#   length 8. Our solution using "struct" should be better.
#
# - Target non-reversal is incorrect, given that we perform a string
#   comparison, which is big-endian.
#

#
# Requires
# python-pysha3
#
# Note: the Keccak hashes are called keccak_*, not sha3_*
# See also: https://pypi.org/project/pysha3/
#
# from hashlib import sha3_512 as sha3
import time
from functools import partial

import _pysha3 as sha3
import copy, struct
from random import randint

import multiprocessing as mp
import binascii

# ----- Definitions -----------------------------------------------------------


WORD_BYTES = 4  # bytes in word
DATASET_BYTES_INIT = 2 ** 30  # bytes in dataset at genesis
DATASET_BYTES_GROWTH = 2 ** 23  # dataset growth per epoch
CACHE_BYTES_INIT = 2 ** 24  # bytes in cache at genesis
CACHE_BYTES_GROWTH = 2 ** 17  # cache growth per epoch
EPOCH_LENGTH = 30000  # blocks per epoch
MIX_BYTES = 128  # width of mix
HASH_BYTES = 64  # hash length in bytes
DATASET_PARENTS = 256  # number of parents of each dataset element
CACHE_ROUNDS = 3  # number of rounds in cache production
ACCESSES = 64  # number of accesses in hashimoto loop


# ----- Appendix --------------------------------------------------------------


def decode_int(s):
    # print(s, type(s), len(s))
    return int(binascii.b2a_hex(s[::-1]).decode(), 16) if s else 0


def encode_int(s):
    a = "%x" % s
    if len(a) % 2 != 0:
        a += '0'
    return '' if s == 0 else binascii.a2b_hex(a)


def zpad(s, length):
    if s:
        return s + b'\x00' * max(0, length - len(s))
    else:
        return b'\x00' * max(0, length - len(s))


def serialize_hash(h):
    s = b''
    for x in h:
        s += zpad(encode_int(x), 4)
    return s


def deserialize_hash(h):
    return [decode_int(h[i:i + WORD_BYTES])
            for i in range(0, len(h), WORD_BYTES)]


def hash_words(h, sz, x):
    if isinstance(x, list):
        x = serialize_hash(x)
    y = h(x)
    return deserialize_hash(y)


# sha3 hash function, outputs 64 bytes

def sha3_512(x):
    def func(v):
        assert type(v) == bytes, f'type of v:{type(v)}'
        return sha3.keccak_512(v).digest()

    # lambda v: sha3.keccak_512(v).digest()
    return hash_words(func, 64, x)


def sha3_256(x):
    assert type(x) == bytes, f'type of v:{type(x)}'
    return hash_words(lambda v: sha3.keccak_256(v).digest(), 32, x)


def xor(a, b):
    return a ^ b


def isprime(x):
    for i in range(2, int(x ** 0.5)):
        if x % i == 0:
            return False
    return True


# ----- Parameters ------------------------------------------------------------


def get_cache_size(block_number):
    sz = CACHE_BYTES_INIT + \
         CACHE_BYTES_GROWTH * (block_number // EPOCH_LENGTH)
    sz -= HASH_BYTES
    while not isprime(sz / HASH_BYTES):
        sz -= 2 * HASH_BYTES
    return sz


def get_full_size(block_number):
    sz = DATASET_BYTES_INIT + \
         DATASET_BYTES_GROWTH * (block_number // EPOCH_LENGTH)
    sz -= MIX_BYTES
    while not isprime(sz / MIX_BYTES):
        sz -= 2 * MIX_BYTES
    return sz


# ----- Cache Generation ------------------------------------------------------


def mkcache(cache_size, seed):
    n = cache_size // HASH_BYTES

    # Sequentially produce the initial dataset
    o = [sha3_512(seed)]
    for i in range(1, n):
        print('cache process 1 {}/{}'.format(i, n))
        o.append(sha3_512(o[-1]))

    # Use a low-round version of randmemohash
    for _ in range(CACHE_ROUNDS):
        print('cache process 2 {}/{}'.format(_, CACHE_ROUNDS))
        for i in range(n):
            print('cache process 2.1 {}/{}'.format(i, n))
            v = o[i][0] % n
            o[i] = sha3_512(list(map(xor, o[(i - 1 + n) % n], o[v])))

    return o


# ----- Data aggregation function ---------------------------------------------


FNV_PRIME = 0x01000193


def fnv(v1, v2):
    return ((v1 * FNV_PRIME) ^ v2) % 2 ** 32


# ----- Full dataset calculation ----------------------------------------------


def calc_dataset_item(cache, i):
    n = len(cache)
    r = HASH_BYTES // WORD_BYTES
    # initialize the mix
    mix = copy.copy(cache[i % n])
    # print(f'mix[0]:{mix[0]} i:{i} mix:{mix}')
    mix[0] ^= i
    mix = sha3_512(mix)
    # fnv it with a lot of random cache nodes based on i
    for j in range(DATASET_PARENTS):
        cache_index = fnv(i ^ j, mix[j % r])
        mix = list(map(fnv, mix, cache[cache_index % n]))
    return sha3_512(mix)


def _calc(i, cache, length):
    print('calc {}/{} {}'.format(i, length, i * 10000 // length))
    return calc_dataset_item(cache, i)


def calc_dataset(full_size, cache):
    length = full_size // HASH_BYTES
    result = []
    ins = list(range(length))

    calc = partial(_calc, cache=cache, length=length)

    with mp.Pool(12) as p:
        result = p.map(calc, ins)
    # for i in range(length):
    #     print('data process {}/{}'.format(i, length))
    #     result.append(calc_dataset_item(cache, i))

    return result


# ----- Main Loop -------------------------------------------------------------


def hashimoto(header, nonce, full_size, dataset_lookup):
    n = full_size / HASH_BYTES
    w = MIX_BYTES // WORD_BYTES
    mixhashes = MIX_BYTES / HASH_BYTES
    # combine header+nonce into a 64 byte seed
    s = sha3_512(header + struct.pack("<Q", nonce))
    # start the mix with replicated s
    mix = []
    for _ in range(MIX_BYTES / HASH_BYTES):
        mix.extend(s)
    # mix in random dataset nodes
    for i in range(ACCESSES):
        p = fnv(i ^ s[0], mix[i % w]) % (n // mixhashes) * mixhashes
        newdata = []
        for j in range(MIX_BYTES / HASH_BYTES):
            newdata.extend(dataset_lookup(p + j))
        mix = list(map(fnv, mix, newdata))
    # compress mix
    cmix = []
    for i in range(0, len(mix), 4):
        cmix.append(fnv(fnv(fnv(mix[i], mix[i + 1]), mix[i + 2]), mix[i + 3]))
    return {
        "mix digest": serialize_hash(cmix),
        "result": serialize_hash(sha3_256(s + cmix))
    }


def hashimoto_light(full_size, cache, header, nonce):
    return hashimoto(header, nonce, full_size,
                     lambda x: calc_dataset_item(cache, x))


def hashimoto_full(full_size, dataset, header, nonce):
    return hashimoto(header, nonce, full_size, lambda x: dataset[x])


# ----- Mining ----------------------------------------------------------------

# We break "mine" down into smaller parts, to have better control over the
# mining process.

def get_target(difficulty):
    return zpad(encode_int(2 ** 256 // difficulty), 64)[::-1]


def random_nonce():
    return randint(0, 2 ** 64)


def mine(full_size, dataset, header, difficulty, nonce):
    target = get_target(difficulty)
    while hashimoto_full(full_size, dataset, header, nonce) > target:
        nonce = (nonce + 1) % 2 ** 64
    return nonce


# ----- Defining the Seed Hash ------------------------------------------------


def get_seedhash(block):
    s = b'\x00' * 32
    for i in range(block // EPOCH_LENGTH):
        s = serialize_hash(sha3_256(s))
    return s


class Ethminer:

    def __init__(self):
        self.block_num = -1
        self.data = []
        self.cache = []
        self.cache_size = 0
        self.full_size = 0

    def generate_data(self, block_num, seed):
        if block_num == self.block_num:
            return True
        print('start generate data')
        st = time.time()
        self.block_num = block_num
        self.cache_size = get_cache_size(block_num)
        self.full_size = get_full_size(block_num)
        _seed = str.encode(seed)
        # print('start generate cache time:{}'.format((time.time() - st) // 1))
        self.cache = mkcache(self.cache_size, _seed)
        # self.cache = [b'0' for i in range(cache_size)]
        print('start generate data time:{}'.format((time.time() - st) // 1))
        self.data = calc_dataset(self.full_size, self.cache)
        print('generate_data success use time:{}'.format((time.time() - st) // 1))
        return False

    def mine(self, start_nonce, difficulty, header):
        return mine(self.full_size, self.data, header, difficulty, start_nonce)


if __name__ == '__main__':

    miner = Ethminer()
    seed = r'd057370840be91b808079bc1e4e585f007b5aa3899720a5c2742b86848e4d4c3'
    miner.generate_data(1, seed)
    assert 0
    import sys

    #
    # usage: ethash.py epoch 0xheaderhash 0xnonce  (real-life mode)
    #        ethash.py dag-lines 0xheaderhash 0xnonce  (mixone mode)
    #
    sys.argv = ['ethash.py', '1', '0xc7b038b5877850d0073b9fe568f9e24c55d552858c9a488071ea7d842ea3be8d', '0xnonce']

    if len(sys.argv) != 4:
        print(sys.stderr, "usage: ", sys.argv[0], "epoch|dag-lines", "0xheaderhash", "0xnonce")
        sys.exit(1)

    # do exactly what mixone does
    if int(sys.argv[1]) > 1000:
        seed = deserialize_hash(get_seedhash(0))
        cache = mkcache(HASH_BYTES, seed)
        dag_bytes = int(sys.argv[1]) * MIX_BYTES
    else:
        block = int(sys.argv[1]) * EPOCH_LENGTH
        seed = deserialize_hash(get_seedhash(block))
        print("seed", "%064x" % decode_int(serialize_hash(seed)[::-1]))
        cache = mkcache(get_cache_size(block), seed)
        dag_bytes = get_full_size(block)
    hdr = encode_int(int(sys.argv[2], base=16))[::-1]
    hdr = b'\x00' * (32 - len(hdr)) + hdr
    nonce = int(sys.argv[3], base=16)
    hash = hashimoto_light(dag_bytes, cache, hdr, nonce)
    print("cmix", "%064x" % decode_int(hash["mix digest"][::-1]))
    print("res ", "%064x" % decode_int(hash["result"][::-1]))
