from collections import OrderedDict

TOKEN_DICT             = b'd'
TOKEN_LIST             = b'l'
TOKEN_INTEGER          = b'i'
TOKEN_END              = b'e'
TOKEN_STRING_SEPARATOR = b':'

class Decoder:
    def __init__(self, data: bytes) -> None:
        if not isinstance(data, bytes):
            raise TypeError('Arg "data" must be type bytes')
        self._data = data
        self._index = 0
    
    def decode(self):
        c = self._peek()

        if c is None:
            raise EOFError('EOF of data')
        elif c == TOKEN_INTEGER:
            self._consume()
            return self._decode_int()
        elif c == TOKEN_DICT:
            self._consume()
            return self._decode_dict()
        elif c == TOKEN_LIST:
            self._consume()
            return self._decode_list()
        elif c in b'0123456789':
            # string
            return self._decode_string()
        else:
            raise RuntimeError(f'Invalid token at {self._index}')
            
    
    def _peek(self) -> bytes:
        if self._index + 1 > len(self._data):
            return None
        return self._data[self._index : self._index + 1]

    def _consume(self) -> None:
        self._index += 1
    
    def _read(self, length: int) -> bytes:
        res = self._data[self._index : self._index + length]
        self._index += length
        return res
    
    def _read_until(self, token: str) -> bytes:
        occurrence = self._data.index(token, self._index)
        res = self._data[self._index : occurrence]
        self._index = occurrence + 1
        return res
    
    def _decode_int(self):
        return int(self._read_until(TOKEN_END))
    
    def _decode_string(self):
        length = self._read_until(TOKEN_STRING_SEPARATOR)
        length = int(length)
        res = self._read(length)
        return res

    def _decode_list(self):
        res = []
        while self._peek() != TOKEN_END:
            res.append(self.decode())
        self._consume() # eat END token
        return res

    def _decode_dict(self):
        res = OrderedDict()
        while self._peek() != TOKEN_END:
            key = self._decode_string()
            value = self.decode()
            res[key] = value
        self._consume() # eat END token
        return res
    
class Encoder():
    def __init__(self, data) -> None:
        self._data = data
    
    def encode(self):
        return self.encode_next(self._data)

    def encode_next(self, data):
        if type(data) == str:
            return self._encode_string(data)
        elif type(data) == int:
            return self._encode_int(data)
        elif type(data) == list:
            return self._encode_list(data)
        elif type(data) == dict or type(data) == OrderedDict:
            return self._encode_dict(data)
        elif type(data) == bytes:
            return self._encode_bytes(data)
        else:
            raise NotImplementedError()

    def _encode_string(self, data: str):
        res = bytearray(str(len(data)), 'utf-8')
        res += TOKEN_STRING_SEPARATOR
        res += str.encode(data)
        return res
    
    def _encode_bytes(self, data: bytes):
        res = bytearray(str(len(data)), 'utf-8')
        res += TOKEN_STRING_SEPARATOR
        res += data
        return res
    
    def _encode_int(self, data: int):
        res = TOKEN_INTEGER
        res += str.encode(str(data))
        res += TOKEN_END
        return res

    def _encode_list(self, data: list):
        res = TOKEN_LIST
        for item in data:
            res += self.encode_next(item)
        res += TOKEN_END
        return res

    def _encode_dict(self, data: dict):
        res = TOKEN_DICT
        for key, value in data.items():
            res += self.encode_next(key)
            res += self.encode_next(value)
        res += TOKEN_END
        return res
