import array
import collections
import datetime
import hashlib
import io
import os
import shutil
import struct
import simplejson
import socket
import tempfile
import threading
import time

import duplicity.librsync as librsync

from keyczar.keys import GenKey
from keyczar.keyinfo import RSA_PRIV
from tornado.httpserver import HTTPServer
from tornado.iostream import IOStream

from observer import IN_CREATE
from observer import IN_MODIFY
from observer import Observer
from utils import logger

from keyczar.keys import AesKey
from keyczar.keys import HmacKey
from keyczar import util

aes_size = 256

EV_TSTAMP = 0x00000001
EV_CREATE = 0x00000002
EV_MODIFY = 0x00000004
TIMEOUT = 60

def now():
    date = datetime.datetime.now()
    return time.mktime(date.timetuple())

def generate_timestamp():
    d = (time.clock() % 60) / 10
    return int(1000*(now()-d))

def validate_timestamp(timestamp, timestamps, timeout):
    if timestamp in timestamps:
        raise ValueError("Repeat message.")
    d = abs(now() - timestamp/1000.0)
    if d > TIMEOUT:
        raise ValueError(
            "Timestamp expired by %d seconds." % (d-TIMEOUT))
    timestamps.add(timestamp)

def secure_ident(ident, public_key):
    sha = hashlib.sha1(ident)
    sha.update(public_key.hash)
    return sha.digest()

def create(reader, uri, size, mtime):
    metadata = simplejson.dumps({
        'uri': uri,
        'size': size,
        'mtime': mtime,
        })

    read = 0
    while read < size:
        chunk = reader.read(librsync.blocksize)
        read += len(chunk)
        yield EV_CREATE, metadata, chunk

def modify(chunks, uri, size, stime, mtime):
    for read, chunk in chunks:
        metadata = simplejson.dumps({
            'uri': uri,
            'size': size,
            'read': read,
            'stime': stime,
            'mtime': mtime,
            })
        yield EV_MODIFY, metadata, chunk

class Operator(threading.Thread):
    """Peer to peer operator.

    Manages two-way secure synchronization between two peers.

    When a secure connection has been established, each party will
    send the most recent timestamp of the relevant shares.
    """

    sent = 0
    recv = 0

    _stream = None
    _active = None

    # format: event_type, event_size
    _event_header = struct.Struct('ii')

    # format: timestamp, length, signature
    _auth_header = struct.Struct('li%ds' % (aes_size//8))

    def __init__(self, handler, key):
        threading.Thread.__init__(self)
        self._handler = handler
        self._key = key
        self._event = threading.Event()
        self._in = collections.deque()
        self._out = collections.deque()
        self._operations = {}
        self._timestamps = set()

    @property
    def closed(self):
        return self._stream is None or self._stream.closed()

    def notify(self, uri, operation):
        self._operations[uri] = operation
        self._maybe_dispatch()

    def run(self, stream):
        self._stream = stream
        self._read()
        self._event.set()
        self._maybe_dispatch()

    def stop(self):
        assert self._stream is not None
        self._stream.close()

    def wait(self, timeout):
        self._event.wait(timeout)

    def _dispatch(self, operation):
        op, metadata, data = operation.next()
        self._send(op, metadata, data)

    def _maybe_dispatch(self):
        if self._stream is None:
            return

        operation = self._active

        if operation is None:
            if not self._operations:
                return
            operation = self._active = self._operations.popitem()[1]

        try:
            self._dispatch(operation)
        except StopIteration:
            self._active = None

    def _read(self):
        """Read encrypted data from remote peer."""

        def unpack(auth):
            timestamp, length, signature = self._auth_header.unpack(auth)

            # validate timestamp
            validate_timestamp(timestamp, self._timestamps, TIMEOUT)

            def decrypt(encrypted):
                # verify signature
                self._key.hmac_key.Verify(encrypted, signature)

                # decrypt message
                body = self._key.Decrypt(encrypted)
                op, es = self._event_header.unpack_from(body)
                size = self._event_header.size
                metadata = body[size:size+es]
                kwargs = simplejson.loads(metadata)
                data = body[size+es:]

                # dispatch and loop
                self.recv += 1
                self._handler(op, data, **kwargs)
                self._maybe_dispatch()
                self._read()

            self._stream.read_bytes(length, decrypt)
        self._stream.read_bytes(self._auth_header.size, unpack)

    def _send(self, op, metadata, data):
        """Send encrypted message to remote peer."""

        # event header
        header = self._event_header.pack(op, len(metadata))

        # encrypted message
        msg = self._key.Encrypt(header+metadata+data)

        # authorization for this message
        signature = self._key.hmac_key.Sign(msg)
        auth = self._auth_header.pack(
            generate_timestamp(), len(msg), signature)

        self._stream.write(auth+msg, self._maybe_dispatch)
        self.sent += 1

class Peer(Observer):
    """Peer.

    This class represents a peer on the network. Typically each
    computer user will run exactly one peer.

    The ``shares`` parameter is a dictionary that maps share
    identification string to a list of peers.

    When local changes happen in one of our shares, we broadcast these
    to the list of peers for that share. Conversely, if an update
    happens on a remote peer, we verify that said peer is included in
    the list for that share before we accept their update.

    In the following, A and B are two peers.

    A sends an update notification of files or directories which have
    been changed since this timestamp. Note that these notification
    will carry no extra information unless the file is small in which
    case the complete file data will included (see the
    ``delta_threshold`` class attribute).

    B receives the update notifications will proceed to generate
    signatures of the mentioned files and transmit them.

    A creates a delta for each signature and transmits it. At the same
    time, he generates an updated signature and stores locally for
    later use.

    If, sometime later, A has updated a file to which he holds a
    stored last known signature for a particular peer, he will simply
    immediately transmit a delta that matches the signature. In the
    event that B changes his file during transmission, A will abort
    the transmission and instead accept changes from B.

    Note that file deltas are transmitted in chunks. The recipient
    immediately applies each received chunk to a temporary location
    and only overwrites the target upon completion.
    """

    delta_threshold = 4096
    chunk_size = 16384

    _auth_header = struct.Struct('l20s')

    def __init__(self, key, ident, host=None, port=None, path=None, shares={}):
        super(Peer, self).__init__()

        self.key = key
        self.path = path
        self.shares = shares
        self.signatures = {}

        self._host = host
        self._port = port
        self._connections = {}
        self._timestamps = set()
        self._snapshots = {}
        self._ident = secure_ident(ident, key.public_key)
        self._transfers = {}
        self._ignore = {}

    def __call__(self, request):
        version, request.uri = request.uri.lstrip('/').split('/', 1)
        assert version == "1.0", "Unsupported API version: %s." % version

        try:
            handler = self.handlers[request.headers.get('Content-Type')]
        except KeyError:
            request.write("HTTP/1.1 400 Bad request\r\n\r\n")
        else:
            stream = request.connection.stream
            body = request.body
            try:
                handler(self, body, stream)
            except Exception, e:
                message = str(e)
                request.write("HTTP/1.1 500 Internal Server Error\r\n")
                request.write("Content-Length: %d" % len(message))
                request.write("\r\n")
                request.write(body)

        # finish request if no callback was registered
        if not request.connection.stream.reading():
            request.finish()

    @property
    def by_ident(self):
        """Peer dictionary keyed by identification string."""

        by_ident = {}
        s = set()
        for peers in self.shares.values():
            for peer in peers:
                s.add(peer)
        for peer in s:
            by_ident[peer._ident] = peer

        return by_ident

    @property
    def recv(self):
        return sum(op.recv for op in self._connections.values())

    @property
    def sent(self):
        return sum(op.sent for op in self._connections.values())

    def create_directory(self, name):
        assert os.path.exists(self.path)
        path = os.path.join(self.path, name)
        os.mkdir(path)
        return path

    def create_share(self, path, share_id):
        open(os.path.join(path, ".dropbox"), 'w').write(share_id)

    def connect(self, peer, callback=None):
        """Connect to remote peer.

        To authenticate ourselves, we present a signed token ``(ident,
        timestamp)``. An encrypted peer-to-peer connection is then set
        up by encrypting an AES symmetric key.
        """

        connection = self._connections.get(peer)

        if connection is None or connection.closed:
            key = AesKey.Generate(aes_size)
            connection = self._connections[peer] = Operator(self._dispatch, key)

            # encryption setup
            header = self._auth_header.pack(
                generate_timestamp(), self._ident) + \
                peer.key.Encrypt(key.key_bytes) + \
                peer.key.Encrypt(key.hmac_key.key_bytes)

            # authentication
            signature = self.key.Sign(header)

            # open socket
            assert peer._port is not None
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
            s.connect((peer._host or "localhost", peer._port))
            stream = IOStream(s)

            def on_write():
                connection.run(stream)
                if callback is not None:
                    callback(connection)

            # send connection request
            body = header + signature
            stream.write('CONNECT / HTTP/1.1\r\n')
            stream.write('Content-Length: %d\r\n' % len(body))
            stream.write('Connection: Keep-Alive\r\n')
            stream.write('\r\n')
            stream.write(body, on_write)
        else:
            if callback is not None:
                callback(connection)

        return connection

    def handle(self, request):
        handler = self._request_handlers.get(request.method)
        if handler is None:
            request.write("HTTP/1.1 403 Method Not Allowed\r\n\r\n")
            request.finish()
        else:
            handler(self, request)

    def notify(self, event):
        """Notify event.

        This method is responsible for dispatching an event to
        relevant subscribers.

        To illustrate the relative complexity, consider that a file
        can be created, modified, moved (from and to) and after some
        iterations, finally deleted, all before we transmit a single
        byte because the network is busy.
        """

        # if the mtime is ``None``, ignore event unconditionally
        mtime = self._ignore.get(event.uri, False)
        if mtime is None:
            return

        path = self.resolve(event.uri)

        if mtime is not False:
            # ignore file if not modified since the internal mtime
            if os.stat(path).st_mtime <= mtime:
                return

            # clear ignore flag
            del self._ignore[event.uri]

        if event.mask == IN_CREATE:
            # dispatch create file operation
            try:
                mtime = self.snapshot(path)
                size = os.path.getsize(path)
                f = io.open(path, 'rb')
            except OSError:
                return

            reader = io.BufferedReader(f, self.chunk_size)
            operation = create(reader, event.uri, size, mtime)
        elif event.mask == IN_MODIFY:
            # given the stored signature, compute a delta and transmit
            # along with the timestamp
            try:
                chunks, size, stored = self.delta(path)
                mtime = self.snapshot(path)
            except IOError:
                return

            operation = modify(chunks, event.uri, size, stored, mtime)
        else:
            raise NotImplementedError("Event mask: 0x%x" % event.mask)

        for peer in self.shares.get(event.share, ()):
            connection = self.connect(peer)
            connection.notify(event.uri, operation)

    def serve(self):
        server = HTTPServer(self.handle)
        server.listen(self._port)

    def delta(self, path):
        stored, signature = self._snapshots.get(path, (0, 0))

        f = open(path, 'rb', librsync.blocksize)
        size = os.path.getsize(path)
        assert size
        maker = librsync._librsync.new_deltamaker(signature)

        def chunks():
            read = 0
            while read < size:
                outbuf = array.array('c')
                chunk = f.read(librsync.blocksize)
                eof = False
                assert chunk

                while chunk:
                    eof, len_inbuf_read, cycle_out = maker.cycle(chunk)
                    outbuf.fromstring(cycle_out)
                    chunk = chunk[len_inbuf_read:]
                    read += len_inbuf_read

                if read == size:
                    eof, len_inbuf_read, cycle_out = maker.cycle('')
                    assert len_inbuf_read == 0
                    outbuf.fromstring(cycle_out)

                yield read, outbuf.tostring()

        return chunks(), size, stored

    def snapshot(self, path):
        before = os.path.getmtime(path)
        snapshot = stored, signature = self._snapshots.get(path, (0, 0))

        if before != stored:
            after = None
            while before != after:
                before = after
                sig_gen = librsync.SigGenerator()
                stream = io.open(path, 'rb')
                with io.BufferedReader(stream, self.buffer_size) as f:
                    bytes = f.read()
                    sig_gen.update(bytes)
                after = os.path.getmtime(path)

            self._snapshots[path] = after, sig_gen.getsig()

        return before

    def _connect(self, request):
        body = request.body
        stream = request.connection.stream

        # unpack header
        timestamp, ident = self._auth_header.unpack_from(body)
        keystring = body[self._auth_header.size:-self.key.size/8]
        signature = body[-self.key.size/8:]
        encrypted = body[:-self.key.size/8]

        # validate timestamp
        validate_timestamp(timestamp, self._timestamps, TIMEOUT)

        # verify signature
        peer = self.by_ident[ident]
        if not peer.key.Verify(encrypted, signature):
            raise ValueError("Invalid signature.")

        # decrypt symmetric key
        assert len(keystring) % 2 == 0
        aes_bytes = self.key.Decrypt(keystring[:len(keystring)//2])
        hmac_bytes = self.key.Decrypt(keystring[len(keystring)//2:])
        hmac = HmacKey(util.Encode(hmac_bytes), size=aes_size)
        aes = AesKey(util.Encode(aes_bytes), hmac, size=aes_size)

        # set up connection
        connection = self._connections.pop(peer, None)
        if connection is not None:
            connection.stop()
        connection = self._connections[peer] = Operator(self._dispatch, aes)
        connection.run(stream)

    def _dispatch(self, event_type, body, **kwargs):
        handler = self._event_handlers[event_type]
        handler(self, body, **kwargs)

    def _create(self, chunk, uri, size, mtime):
        """Create file.

        As we receive data, we write to a temporary location. When all
        data is received, the file is moved to its actual location.
        """

        path = self.resolve(uri)

        f = self._transfers.get(uri)
        if f is None:
            f = self._transfers[uri] = open(path+".dropbox", 'w')

        f.write(chunk)

        if f.tell() >= size:
            f.close()
            del self._transfers[uri]

            # maybe delete, then rename to target filename
            self._ignore[uri] = None
            if os.path.exists(path):
                os.unlink(path)
            os.rename(f.name, path)
            os.utime(path, (mtime, mtime))
            self._ignore[uri] = mtime

    def _modify(self, chunk, uri, size, read, stime, mtime):
        """File modification.

        As we receive chunks of file delta data, they're used to patch
        the original data to a file in a temporary location.
        """

        path = self.resolve(uri)

        if not os.path.exists(path):
            return

        if os.stat(path).st_mtime > mtime:
            return

        transfer = self._transfers.get(uri)
        if transfer is None:
            f = open(path+".dropbox", 'wb')
            p = librsync._librsync.new_patchmaker(open(path, 'rb'))
            transfer = self._transfers[uri] = f, p
        else:
            f, p = transfer

        eof = False

        while chunk and not eof:
            eof, len_inbuf_read, cycle_out = p.cycle(chunk)

            try:
                f.write(cycle_out)
            except IOError, e:
                del self._transfers[uri]
                logger.critical(e)
                return

            chunk = chunk[len_inbuf_read:]

        if f.tell() >= size:
            del self._transfers[uri]
            f.close()

            self._ignore[uri] = None
            if os.path.exists(path):
                os.unlink(path)
            os.rename(f.name, path)
            os.utime(path, (mtime, stime))
            self._ignore[uri] = stime

    _event_handlers = {
        EV_CREATE: _create,
        EV_MODIFY: _modify,
        }

    _request_handlers = {
        'CONNECT': _connect
        }

def create_test_peer(ident, **kwargs):
    key = GenKey(RSA_PRIV, 768)
    f = tempfile.NamedTemporaryFile()
    path = f.name
    f.close()
    path = os.path.realpath(path)
    os.mkdir(path)
    def cleanup():
        shutil.rmtree(path)
    peer = Peer(key, ident, path=path, **kwargs)
    peer.__tempfile = f
    f.__del__ = cleanup
    return peer


