from config import *
from common import *
import os
import random
import shutil
import socket
import sqlite3
import time
from cStringIO import StringIO
from hashlib import md5
from twisted.internet import reactor, threads
from twisted.enterprise import adbapi       
from twisted.python.util import println
import cPickle   
class Peer:        
    schema_file = 'peer/peer.sql'
    '''the database schema file'''
    crypto_schema_file = 'peer/peer_crypto.sql'
    '''the database for crypto keys'''      
    num_unsent_blocks = 0
    '''whether the ids of all of the data blocks\
     in the database are sent to the recipe server and tracker'''
    recipe_queue = []
    '''
    Recipe queue is a list of tuples (filename, file_hash, processed_bytes, xids, yids).
    When files are uploaded by the user, they are saved to a file, and  the recipe of a file
    is created incrementally.
    '''  
    recipe_download_queue = []
    '''
    Recipe download queue is the list of recipes that are being downloaded.    
    '''  
    file_queue = []
    '''\
    file_queue is the list of files whose data blocks exist on database but they
    are not on disk. Periodically part of a file gets written to the disk.
    '''
    local_recipes = []
    '''
    the first 100 rows of recipes table, updated periodically.
    '''   
    rs_search_results = []    
    '''
    The recipe search results of the last search
    '''        
    sql_results = None
    rpc_cli_map = {}
    def __init__(self, name,
                 rs_host=RECIPE_SERVER_EXTERNAL_IP,
                 rs_port=RECIPE_SERVER_EXTERNAL_PORT,
                 tracker_host=TRACKER_EXTERNAL_IP,
                 tracker_port=TRACKER_EXTERNAL_PORT,
                 monitor_host=MONITOR_EXTERNAL_IP,
                 monitor_port=MONITOR_EXTERNAL_PORT,
                 external_port=None,                   
                 ):         
        if not name:
            raise Exception('name must be specified')
        self.name = name                            
        rootdir = '.%s.peer' % self.name        
        self.rootdir = rootdir
        if not os.path.exists(rootdir):
            os.mkdir(rootdir)
        self.db = '%s/%s.sqlite' % (rootdir, self.name)
        #saving the crypto keys in a different database allows us 
        #to delete the other database without destroying the public keys
        self.crypto_db = '%s/%s-crypto.sqlite' %(rootdir, self.name)
        self.filedir = '%s/shared' % rootdir
        if not os.path.exists(self.filedir):
            os.mkdir(self.filedir)
        id_file = '%s/id' % rootdir
        if os.path.exists(id_file):
            self.id = open(id_file).read()
        else:
            self.id = md5('%d%s%d' % 
                      (time.time(), 
                       self.name, 
                       random.randint(0, int(time.time())))).hexdigest()
            with open(id_file, 'w') as f:
                f.write(self.id)            
        check_schema(self.db, self.schema_file)
        check_schema(self.crypto_db, self.crypto_schema_file)        
        crypto.generate_keys(self.crypto_db)
        self.pubkey = crypto.get_public_key(self.crypto_db)
        self.pickled_pubkey = cPickle.dumps(self.pubkey)
        rs_host = rs_host or 'localhost'
        tracker_host =  tracker_host or 'localhost'
        monitor_host = monitor_host or 'localhost'                  
        self.recipe_server = RPCClient(rs_host, rs_port, ping_interval=5)
        self.tracker = RPCClient(tracker_host, tracker_port, ping_interval=5)
        self.monitor = RPCClient(monitor_host, monitor_port, ping_interval=5)
        # TODO: probably better to have this pubkey builtin to the program
        set_public_key(self.monitor)
        set_public_key(self.recipe_server)
        set_public_key(self.tracker) 
        self.rpc_host = '0.0.0.0'
        self.rpc_port = get_free_port()
        self.rpc_server = RPCServer(self)
        self.external_ip = '127.0.0.1'
        self.external_port = external_port or self.rpc_port
        self.dbpool = adbapi.ConnectionPool('sqlite3', self.db, check_same_thread=False, timeout=30)                
        self.update_db_cache()
        self.add_to_tracker()                                       
        self.update_anonym_blocks()
    
    def add_to_tracker(self):
        if self.tracker.status:
            self.tracker.add_peer(self.id, self.name, self.external_ip,
                                   self.external_port, self.pickled_pubkey)
            reactor.callLater(30, self.add_to_tracker)
        else:
            reactor.callLater(3, self.add_to_tracker)
    ####################RPC METHODS #############################
    def get_public_key(self):
        return self.pubkey
    def get_block_data(self, bids):        
        '''
        return an array that contains the data blobs for each block in bids.
        '''
        print "get_block_data"
        def nit(txn):
            print "get_block_data.nit"            
            ret = []                   
            for bid in bids:
                res = txn.execute('select data from blocks where id="%s"' % bid)
                row = res.fetchone()
                ret.append(str(row[0]))
            return ret
        d = self.dbpool.runWithConnection(nit)
        d.addErrback(errorHandler("get_block_data.nit failed"))    
        return d    
    #####################WEB UI METHODS ############################
    def run_sql(self, query):        
        def nit(txn):
            print "running sql query",query
            res = txn.execute(query)
            sql_results = []
            for row in res:
                arr = []
                for data in row:
                    if isinstance(data, buffer):
                        data = ''.join(map(lambda x:'%02x'%ord(x), data[:16]))
                    else:
                        data = str(data)
                    arr.append(data)
                sql_results.append(arr)                                                    
            print "setting sql_results"
            self.sql_results = sql_results                
        self.dbpool.runWithConnection(nit).addErrback(errorHandler("run_sql failed"))        
    
    def save_existing_file(self, file, filename):
        '''
        This function is called by the web interface to add a complete file that's received from the web UI user.
        At this point file is uploaded to a temporary path. this path is moved under the files directory of the user.
        Then it is added to recipe_queue for processing.
        '''    
        def nit():
            print "save_existing_file"            
            path = "%s/%s" % (self.filedir, filename)
            wfile = open(path, 'wb')
            shutil.copyfileobj(file, wfile)
            wfile.close()
            fileid = get_file_id(path)                                
            return (fileid, path)
        d = threads.deferToThread(nit)
        d.addErrback(errorHandler("save_existing_file, copying failed"))
        def nit(t):
            print "calling create_recipe with db connection"            
            return self.dbpool.runWithConnection(self.create_recipe, t[0], t[1])
        d.addCallback(nit)
        d.addErrback(errorHandler("save_existing_file, call to create_recipe failed"))
        return d
            
    
    def search_rs(self, query):
        """ask the recipe server for the specified query"""
        print "search_rs"     
        if not self.recipe_server.status:
            print "recipe_server not available"
            return []
        def nit(recipes):  
            if recipes is not None:
                print "storing search results the recipes, #recipes = ", len(recipes)
                self.rs_search_results = [(r.file_id, r.file_info.name, r.file_info.size) for r in recipes]
        d = self.recipe_server.search(query)
        d.addErrback(errorHandler("recipe_server_search failed"))
        d.addCallback(nit)
        
    
    def download_file(self, fileid):
        print "download_file"
        if not self.recipe_server.status:
            print "not connected to recipe server"
            return
        if not self.tracker.status:
            print "not connected to tracker"
            return
        def nit(recipe):
            if recipe is None:
                print "could not issue a search query to recipe server,\
                    or the recipe is not found on the recipe server"
                return
            print "got the recipe"
            print "num blocks = ", len(recipe.xids)
            block_ids = recipe.xids + recipe.yids
            me = (self.id, self.name, self.external_ip, self.external_port, self.pickled_pubkey)            
            def nit(bid2peers):
                recipe.peer2bids = choose_peers_for_download(me, bid2peers)
                if recipe.peer2bids is None:
                    print "error in download_file"
                    print "could not find all of the blocks"
                    return
                recipe.blocks_downloaded = 0
                recipe.num_blocks = recipe.num_blocks()
                self.recipe_download_queue.append(recipe)
                reactor.callLater(0.1, self.download_recipes)
                print "recipe with fileid, ", fileid, " is added to recipe download queue"
            def handle_deferred(d):
                d.addCallback(nit)
                d.addErrback(errorHandler('error in nit')) 
            d = self.tracker.get_sharing_peers(block_ids)
            d.addErrback(errorHandler("get_sharing_peers call failed"))
            d.addCallback(handle_deferred)
            d.addErrback(errorHandler('handle_deferred failed'))
        d = self.recipe_server.get_recipe(fileid)
        d.addErrback(errorHandler("get_recipe call failed"))
        d.addCallback(nit)        
    #################### DB METHODS GET STUFF FROM DB TURN THEM INTO OBJECTS ####################################
    def _get_recipe(self, txn, file_id):
        res = txn.execute('select * from recipes where file_id = "%s"' % file_id)
        row = res.fetchone()
        if not row:
            print "no recipe with file_id = ", file_id
        file_info = FileInfo(row[1], row[2], row[3])
        block_size = row[5]
        res = txn.execute('select block_id1, block_id2 from recipe_blocks where file_id="%s" order by rowid' % file_id)
        xids,yids = zip(*res)        
        return Recipe(file_id, file_info, xids, yids, block_size)

    def get_recipe(self, file_id):
        """get the recipe for a single file"""        
        d= self.dbpool.runWithConnection(self._get_recipe)
        d.addErrback(errorHandler("get_recipe, database call faield"))
        return d    
    def add_zero_block_to_db(self, txn, length=BLOCK_SIZE):        
        """Add the block of 0s of length length to the database."""                    
        hash = get_zero_hash(length) 
        buff = buffer('\x00'*length)               
        txn.execute('insert or ignore into blocks values (?, ?, ?)', (hash,buff, 0))            
        return hash
          


    #######################QUEUE METHODS THAT MUST BE CALLED PERIODICALLY##################
    def _update_db_cache(self,txn):
        print "update_db_cache"        
        c = txn.execute('select * from recipes limit 100')
        self.local_recipes = map(lambda row:(row[0], row[1], row[2], (row[2]-1)/row[5] + 1, row[4]), c)       
    def update_db_cache(self):
        '''
        three small methods that update three caches at different intervals
        Each methods call itself after execution, so the method needs to be called only once.
        '''
        self.dbpool.runWithConnection(self._update_db_cache)
    
    def get_rpc_cli(self, peer_id, host, port):
        if peer_id not in self.rpc_cli_map or not self.rpc_cli_map[peer_id].status:
            rpc_cli = RPCClient(host, port)
            rpc_cli.enable_encryption(cPickle.loads(pickled_pubkey), pickled_pubkey)
            self.rpc_cli_map[peer_id] = rpc_cli
        return self.rpc_cli_map[peer_id]
    def update_anonym_blocks(self): 
        print "update_anonym_blocks"   
        def _handle_block_data(txn, block_data, bids):
            print "inserting data to anonymizer_blocks"
            txn.executemany('insert or ignore into anonymizer_blocks values (?,?)', 
                               zip(bids, map(buffer, block_data)))
        def handle_block_data(block_data, bids):   
            print "handle_block_data"                                    
            d = self.dbpool.runInteraction(_handle_block_data, block_data, bids)
            d.addErrback(errorHandler("error in _handle_block_data"))
        def handle_deferred(d, bids):
            print "handle_deferred"
            d.addCallback(handle_block_data, bids)
            d.addErrback(errorHandler('problem in handle_block_data'))
        if not self.tracker.status:
            reactor.callLater(5, self.update_anonym_blocks)
            return
        def nit(bid2peers):
            peer2bids = choose_peers_for_download(me, bid2peers)
            for peer, bids in peer2bids.iteritems():
                peer_id, peer_name, host, port, pickled_pubkey = peer
                if peer_id == self.id:
                    #do nothing since the peer already has these blocks
                    #remove the blocks from list of the blocks to be downloaded
                    print "found self in the peers to download list, skipping those blocks"                                                                        
                else:                                                                                                
                    rpc_cli = self.get_rpc_cli(peer_id, host, port)
                    d = rpc_cli.get_block_data(bids)
                    d.addCallback(handle_deferred, bids)
                    d.addErrback(errorHandler('problem in handle_deferred'))
        d = self.monitor.get_unused_blocks()
        d.addErrback(errorHandler('error in get_unused_blocks'))
        reactor.callLater(30, self.update_anonym_blocks)
    def get_anonym_block(self,txn,block_size):
        print "get_anonym_block"        
        res = txn.execute("select count(*) from anonymizer_blocks")
        num_rows = res.fetchone()[0]
        if num_rows == 0:
            print "no rows, generating data"
            data = os.urandom(block_size)
            bid = md5(data).hexdigest()
            return (bid, data)
        rowind = random.randint(0, num_rows-1)        
        res = txn.execute("select id,data from anonymizer_blocks limit %d,1 " % rowind)
        return map(str, res.fetchone())
        
        
    def create_recipe(self, txn, fileid, path, block_size=BLOCK_SIZE):        
        print "create_recipe"        
        m = {'id' : fileid, 'name':path, 'size':os.path.getsize(path), 'processed' : 0}        
        fp = open(path)        
        self.recipe_queue.append(m)            
        while True:
            #handle at most 512 blocks at a time = 16 MB
            blob = fp.read((1 << 9)*block_size)
            #print "handling a blob of size = ", len(blob)                        
            if blob:
                blocks = []
                recipe_blocks = []
                last_zero_block_size = 0
                hy = ''
                #anonym_blocks = self.get_anonym_blocks(txn, len(blob), block_size)
                for i in xrange(0,len(blob), block_size):
                    d = blob[i:i+block_size]
                    hx, xdata = self.get_anonym_block(txn, block_size)
                    ydata = xor_blocks(xdata, d)
                    hy = md5(ydata).hexdigest()                    
                    blocks.append((hx, buffer(xdata), 0,))
                    blocks.append((hy, buffer(ydata), 0,))
                    if random.randint(0,1):
                        tmp = hx
                        hx = hy
                        hy = tmp                                                                                            
                    recipe_blocks.append((fileid, hx, hy,))
                    
                txn.executemany("insert or ignore into blocks values (?, ?, ?)",
                            iter(blocks))            
                txn.executemany("insert into recipe_blocks values (?,?,?)", 
                                 iter(recipe_blocks))                      
                m['processed'] += len(blob)
                self.num_unsent_blocks += len(blocks)          
            else:            
                print "file processing is finished, adding recipe to the database"                                    
                txn.execute("insert or ignore into recipes values (?,?,?,?,?,?)", 
                        (fileid, os.path.basename(fp.name), m['processed'], 'general', 0, block_size,))                            
                reactor.callLater(1, self.share_blocks)
                fp.close()
                #remove the file from the queue                
                break
                
    def process_file_queue(self):        
        print "process_file_queue"
        recipe, file = self.file_queue[-1]
        def nit(txn):
            #num_blocks = min(100, len(recipe.xids))
            num_blocks = len(recipe.xids)        
            for i in xrange(num_blocks):
                c = txn.execute('select data from blocks where id="%s"' % recipe.xids[i])
                xdata = c.fetchone()[0]                        
                c = txn.execute('select data from blocks where id="%s"' % recipe.yids[i])
                ydata = c.fetchone()[0]            
                file.write(xor_blocks(xdata,ydata))
                recipe.processed += 1                   
            print "finished writing the file"
            oldpath = file.name
            file.close()
            newpath = '%s/%s' % (self.filedir, recipe.file_info.name)
            print "renaming %s to %s" % (oldpath, newpath)
            os.rename(oldpath, newpath)            
        d = self.dbpool.runWithConnection(nit)
        d.addErrback(errorHandler('problem in process_file_queue.nit'))
    
    def _download_finished(self, txn, recipe):
        print "adding recipe blocks to recipe_blocks table"
        txn.executemany('insert or ignore into recipe_blocks values (?,?,?)', 
                    zip([recipe.file_id]*len(recipe.xids), recipe.xids, recipe.yids))
        print "adding to recipe to recipes table"
        txn.execute('insert or ignore into recipes values (?,?,?,?,?,?)', 
                    (recipe.file_id, recipe.file_info.name, 
                     recipe.file_info.size, recipe.file_info.category, 
                     1, recipe.block_size,))                          
        print "adding download to the file construction queue"            
        tmp_file = open('%s/%s.tmp' % (self.rootdir, recipe.file_info.name), 'wb')        
        recipe.processed = 0
        self.file_queue.append((recipe, tmp_file))
        reactor.callLater(0.1, self.process_file_queue)
        reactor.callLater(1, self.share_blocks)
    def _handle_block_data(self, txn, block_data, recipe):
        print "_download_recipe_blocks.nit"
        num_blocks = len(block_data)            
        peer, bids = recipe.peer2bids.items()[0]
        peer_id, peer_name, host, port, pickled_pubkey = peer
        txn.executemany('insert or ignore into blocks values (?,?,?)', 
                               zip(bids[:num_blocks], map(buffer, block_data), [0]*num_blocks))
        bids = bids[num_blocks:]
        recipe.blocks_downloaded += num_blocks
        if not bids:
            print "finished downloading all of the blocks from the peer at %s:%d" %(host, port)
            del recipe.peer2bids[peer]
        else:
            recipe.peer2bids[peer] = bids
        reactor.callLater(0.2, self.download_recipes)
        

    def download_recipes(self):        
        recipe = self.recipe_download_queue[-1]
        peer2bids = recipe.peer2bids
        
        def handle_block_data(block_data):   
            print "handle_block_data"                                    
            d = self.dbpool.runInteraction(self._handle_block_data, block_data, recipe)
            d.addErrback(errorHandler("error in _handle_block_data"))
        def handle_deferred(d):
            print "handle_deferred"
            d.addCallback(handle_block_data)
            d.addErrback(errorHandler('problem in handle_block_data'))
        if not recipe.peer2bids:
            print "finished downloading the blocks for the recipe with id ", recipe.file_id            
            d = self.dbpool.runInteraction(self._download_finished, recipe)
            d.addErrback(errorHandler("error in nit"))            
        else:
            peer, bids = peer2bids.items()[0]
            peer_id, peer_name, host, port, pickled_pubkey = peer
            if peer_id == self.id:
                #do nothing since the peer already has these blocks
                #remove the blocks from list of the blocks to be downloaded
                print "found self in the peers to download list, skipping those blocks"
                del recipe.peer2bids[peer]  
                recipe.blocks_downloaded += len(bids)
                reactor.callLater(0.1, self.download_recipes)
            else:                
                num_blocks = min(100, len(bids))                                                   
                if peer_id not in self.rpc_cli_map or not self.rpc_cli_map[peer_id].status:
                    rpc_cli = RPCClient(host, port)
                    rpc_cli.enable_encryption(cPickle.loads(pickled_pubkey), pickled_pubkey)
                    self.rpc_cli_map[peer_id] = rpc_cli
                rpc_cli = self.rpc_cli_map[peer_id]
                d = rpc_cli.get_block_data(bids[:num_blocks])
                d.addCallback(handle_deferred)
                d.addErrback(errorHandler('problem in handle_deferred'))  
        
    ##############################METHODS THAT CHECK THE DATABASE FOR UPDATES PERIODICALLY################
    def _share_blocks(self, txn):
        print "share_blocks"
        if not self.tracker.status:
            reactor.callLater(2, self.share_blocks)
            return                        
        res = txn.execute('select count(*) from blocks where sent_to_tracker=0')
        self.num_unsent_blocks = res.fetchone()[0]
        print "num_unsent_blocks = ",self.num_unsent_blocks            
        if not self.num_unsent_blocks:
            print "all blocks are sent to the tracker"
            reactor.callLater(0.2, self.register_recipes)
            return        
        res = txn.execute('select id from blocks where sent_to_tracker = 0 limit 512')
        ids = map(lambda x:x[0], res)        
        d = self.tracker.add_share_info(self.id, ids)
        d.addErrback(errorHandler('failed to call add_share_info'))                                                        
        _ids = [{'id':x} for x in ids]
        def nit():
            d = self.dbpool.runInteraction(lambda txn:
                                           txn.executemany('update blocks set sent_to_tracker=1 where id=:id', iter(_ids)))
            d.addErrback(errorHandler("problem updating the blocks sent to tracker"))            
            d.addCallback(lambda ignored: self.share_blocks())        
        reactor.callLater(0.1, nit)
    def share_blocks(self):
        """
        sends at most 100 blocks' information to tracker.        
        """   
        if not self.tracker.status:
            reactor.callLater(2, self.share_blocks)
            return                     
        d = self.dbpool.runInteraction(self._share_blocks)            
        d.addErrback(errorHandler('failed to call _share_blocks'))
        

                           
    
    def _register_recipes(self, txn):
        print "_register_recipes"
        if  not self.recipe_server.status:
            reactor.callLater(2, self.register_recipes)
            return                
        res = txn.execute('select file_id from recipes where sent_to_rs=0 limit 1')
        ids = map(lambda x:x[0], res)                
        if not ids:
            print "no recipe to register"
            self.update_db_cache()
            return
        file_id = ids[0]        
        print 'calling _get_recipe with file_id =',file_id
        recipe = self._get_recipe(txn, file_id)
        xids,yids = recipe.xids,recipe.yids
        print 'len(xids) = %d, len(yids) = %d' % tuple(map(len, (xids, yids)))
        print 'calling add_recipe_info'
        def send_file_info():                        
            d = self.recipe_server.add_recipe_info(recipe.file_id, recipe.file_info.name, 
                                               recipe.file_info.size, 
                                               recipe.file_info.category, 
                                               recipe.block_size)
            d.addErrback(errorHandler("problem occured during the registration of the recipe"))
            return d
        fileid = recipe.file_id
        def send(ignored, xids, yids):
            print "seind 512 more xids, yids"
            if not xids:
                def nit(_):
                    d = self.dbpool.runOperation(
                                  "update recipes set sent_to_rs = 1 where file_id='%s'" % fileid)
                    d.addErrback(errorHandler("can not update the sent recipes"))
                    d.addCallback(lambda _:self.register_recipes())
                d = send_file_info()                                  
                d.addCallback(nit)
                d.addErrback(errorHandler("error in nit"))
                return
            d = self.recipe_server.add_recipe_blocks(fileid, xids[:512], yids[:512])
            d.addErrback(errorHandler("problem in add_recipe_blocks"))
            d.addCallback(send, xids[512:], yids[512:])
        send(None, xids, yids)            
        
                        
    def register_recipes(self):
        """
        registers at most 100 recipes that are not currently registered.
        The aim is to prevent load on recipe server and enable the user
        to use the program even when the recipe server is not online.
        """  
        print "register_recipes"
        if  not self.recipe_server.status:
            reactor.callLater(2, self.register_recipes)
            return       
        d = self.dbpool.runInteraction(self._register_recipes)
        d.addErrback(errorHandler("_register_recipes failed"))
    #########################################METHODS ABOUT RUNNING###################        
    
    def reset(self):    
        print "RESET"
        print "resetting the peer internals"        
        self.recipe_queue = []
        self.recipe_download_queue = []
        self.file_queue = []
        self.local_recipes = []
        self.local_blocks = []
        self.local_recipe_blocks = []
        self.rs_search_results = []
        self.tracker.remove_peer_blocks(self.id)
        print "clearing the databases"
        drop_tables(self.db)
        create_tables(self.db, self.schema_file)                            
        print "DONE RESET"        
        



