#
# fusion.sync.sync_manager
#
# Copyright 2007 Helsinki Institute for Information Technology
# and the authors.
#
# Authors: Ken Rimey <rimey@hiit.fi>
#

# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

"""
Synchronization manager

A sync manager is a container for sync sessions.
"""

import logging
from time import time
from random import shuffle
from socket import gethostname, gethostbyname, inet_aton, error

from fusion.sync.sync_session import SyncSession
from fusion.discovery.advertise import advertise, advertisement_threads
from fusion.discovery.discover import discover, discovery_threads

class SyncManager(object):
    def __init__(self, db, stream_mgr, callback_callback,
                 listening_address=None):
        self.sessions = set()
        self.db = db
        self.mgr = stream_mgr
        self.callback = callback_callback
        self.listening_address = listening_address
        self.start_time = time()
        self.last_connection_update = 0
        self.last_presence_update = 0
        self.last_advertisement_update = 0
        self.datasets_to_sync = self.generate_datasets_to_sync()
        self.squelch = {}       # map IID, peer ID, or address to time
        if self.listening_address:
            self.server = self.listen(self.listening_address)

    def add(self, session):
        self.sessions.add(session)

    def discard(self, session):
        self.sessions.discard(session)

    def connect(self, address, id=None, factory=SyncSession, **options):
        stream = self.mgr.create_stream()
        session = factory(stream, self.db, self,
                          my_id = self.db.my_id,
                          peer_id = id,
                          listening_address = self.listening_address,
                          **options)
        session.connect(address)
        return session

    def listen(self, address):
        def handle_accept(stream_mgr, *args):
            stream = stream_mgr.create_stream(*args)
            SyncSession(stream, self.db, self,
                        my_id = self.db.my_id)
            server.accept(handle_accept)

        server = self.mgr.create_server()
        server.listen(address)
        server.accept(handle_accept)
        return server

    def update(self, delay=30):
        for session in self.sessions:
            session.update()

        self.update_connections()

        if self.listening_address and time() - self.start_time > delay:
            self.update_presence()
            self.update_advertisements()

    def update_connections(self, interval=5):
        if not discovery_threads \
                and time() - self.last_connection_update > interval:
            self.last_connection_update = time()

            # XXX Some thoughts on how to make the connection update
            # process lighter and yet more responsive:
            #
            # 1. Factor it out into a separate module that accepts
            # IIDs to probe.
            #
            # 2. Bump the retry intervals from 10 minutes to one hour.
            #
            # 3. Detect accessed IIDs and probe those promptly.  Apply
            # a one-minute squelch, but override all other retry intervals.

            iid = self.datasets_to_sync.next()
            if iid is not None:
                def callback(results):
                    self.funcall(self.process_peer_ids, iid, results)
                discover(iid, cb=callback)

    def generate_datasets_to_sync(self, minimum=2, iid_retry_interval=600):
        while True:
            for iid in self.db.list_subscriptions():
                if self.count_sessions(iid) < minimum:
                    last_failure = self.squelch.get(iid, 0)
                    if time() - last_failure > iid_retry_interval:
                        yield iid
            yield None

    def count_sessions(self, iid):
        return len(self.list_sessions(iid))

    def list_sessions(self, iid):
        return [session for session in self.sessions
                if iid in session.datasets_served]

    def process_peer_ids(self, iid, results, peer_id_retry_interval=600):
        try:
            for advertisement in results:
                peer_id = advertisement['id']
                last_failure = self.squelch.get(peer_id, 0)
                if time() - last_failure > peer_id_retry_interval \
                        and not self.is_redundant_id(peer_id):
                    def callback(results):
                        self.funcall(self.process_addresses, peer_id, results)
                    discover(peer_id, cb=callback)
                    return
        except StandardError:
            logging.warning('Got bad advertisement data.')

        self.squelch[iid] = time()

    def process_addresses(self, peer_id, results,
                          connection_retry_interval=600):
        try:
            for presence in results:
                addresses = presence['addresses']
                type = self.get_preferred_type(addresses)
                addresses = filter_addresses(addresses, [type])
                shuffle(addresses)
                for host, port in addresses:
                    address = host, port # convert list to tuple
                    if not self.is_redundant_address(address):
                        last = self.squelch.get(address, 0)
                        if time() - last > connection_retry_interval:
                            self.squelch[address] = time()
                            self.connect(address, id=peer_id)
                            return
        except StandardError:
            logging.warning('Got bad presence data.')

        self.squelch[peer_id] = time()

    def is_redundant_id(self, id):
        return (id == self.db.my_id
                or any(id in session.peer_ids
                       for session in self.sessions))

    def is_redundant_address(self, (host, port)):
        return ((host, port) in self.get_my_addresses()
                or any((session.remote_host == host
                        and session.remote_port in (port, None))
                       for session in self.sessions))

    def get_preferred_type(self, addresses):
        hosts = set(host for host, port in addresses)

        his_private = filter_addresses(hosts, ['private'])
        if his_private:
            my_public = self.get_my_ip_addresses(['public'])
            if hosts.intersection(my_public):
                return 'private'

        return 'public'

    def update_presence(self, interval=30, ttl=600, minttl=60):
        if time() - self.last_presence_update > interval:
            self.last_presence_update = time()

            addresses = self.get_my_addresses()
            if addresses:
                data = {'addresses': addresses}
                advertise([self.db.my_id], data, ttl, minttl)

    def update_advertisements(self, interval=5, ttl=24*3600, minttl=600):
        if not advertisement_threads \
                and time() - self.last_advertisement_update > interval:
            self.last_advertisement_update = time()

            data = {'id': self.db.my_id}
            iids = self.db.list_subscriptions()
            advertise(iids, data, ttl, minttl)

    def get_my_addresses(self, types=['public', 'private']):
        port = self.get_my_port()
        if port is None:
            return []
        else:
            addresses = self.get_my_ip_addresses(types)
            return [(host, port) for host in addresses]

    def get_my_port(self):
        try:
            host, port = self.listening_address
            return port
        except ValueError:
            return None

    def get_my_ip_addresses(self, types):
        results = set()

        try:
            host = gethostbyname(gethostname())
            results.add(host)
        except error:
            pass

        for session in self.sessions:
            for address in (session.my_inward_address,
                            session.my_outward_address):
                if address:
                    host, port = address
                    results.add(host)

        return sorted(filter_addresses(results, types))

    def funcall(self, f, *args):
        def proxy():
            try:
                f(*args)
            except:
                print_exc()

        cb = self.callback
        if f is not None and cb is not None:
            cb(proxy)

def filter_addresses(addresses, types):
    # The addresses can be either IP addresses or (host, port) pairs.
    return [x for x in addresses if get_address_type(x) in types]

def get_address_type(address):
    if isinstance(address, str):
        host = address
    else:
        host, port = address

    try:
        s = inet_aton(host)
    except error:
        return 'illegal'

    a, b, c, d = map(ord, s)
    if a == 0:
        return 'zero'
    elif a == 10:
        return 'private'
    elif a == 127:
        return 'localhost'
    elif a == 169 and b == 254:
        return 'zeroconf'
    elif a == 172 and 16 <= b <= 31:
        return 'private'
    elif a == 192 and b == 0 and c == 2:
        return 'documentation-and-examples'
    elif a == 192 and b == 88 and c == 99:
        return 'ipv6-to-ipv4-relay-anycast'
    elif a == 192 and b == 168:
        return 'private'
    elif a == 198 and 18 <= b <= 19:
        return 'network-device-benchmark'
    elif 224 <= a <= 239:
        return 'multicast'
    elif 240 <= a <= 255:
        return 'reserved'
    else:
        return 'public'
