import pickle
import random
import logging

from google.appengine.api import memcache

def delete(key):
  chunk_keys = memcache.get(key)
  if chunk_keys is None:
    return False
  chunk_keys.append(key)
  memcache.delete_multi(chunk_keys)
  return True

def set(key, value):
  #pickled_value = pickle.dumps(value)
  pickled_value = value
  
  # delete previous entity with the given key
  # in order to conserve available memcache space.
  delete(key)

  pickled_value_size = len(pickled_value)
  chunk_keys = []
  for pos in range(0, pickled_value_size, memcache.MAX_VALUE_SIZE):
    # TODO: use memcache.set_multi() for speedup, but don't forget
    # about batch operation size limit (32Mb currently).
    chunk_size = min(memcache.MAX_VALUE_SIZE, pickled_value_size - pos)
    chunk = pickled_value[pos:pos + chunk_size]

    # the pos is used for reliable distinction between chunk keys.
    # the random suffix is used as a counter-measure for distinction
    # between different values, which can be simultaneously written
    # under the same key.
    chunk_key = '%s%d%d' % (key, pos, random.getrandbits(31))

    is_success = memcache.set(chunk_key, chunk)
    if not is_success:
      return False
    chunk_keys.append(chunk_key)
    
  logging.debug("Saving key %s to blobcache, split in %i chunks", key, len(chunk_keys))
  return memcache.set(key, chunk_keys)


def get(key):
  chunk_keys = memcache.get(key)
  if chunk_keys is None:
    return None
  chunks = []
  for chunk_key in chunk_keys:
    # TODO: use memcache.get_multi() for speedup.
    # Don't forget about the batch operation size limit (currently 32Mb).
    chunk = memcache.get(chunk_key)
    if chunk is None:
      return None
    chunks.append(chunk)
  pickled_value = ''.join(chunks)
  try:
    logging.debug("Retrieved key %s from blobcache, split in %i chunks", key, len(chunk_keys))
    #return pickle.loads(pickled_value)
    return pickled_value
  except Exception:
    return None