
from collections import defaultdict, namedtuple
from hashlib import sha1
import math
import os
import time
import asyncio
from asyncio import Queue
import logging

from pubsub import pub
from BitClient.torrent import Torrent

from tracker import Tracker
from protocol import PeerConnection, REQUEST_SIZE

MAX_PEER_CONNECTIONS = 50

class TorrentClient:
    def __init__(self, torrent):
        self.tracker = Tracker(torrent)
        self.avaliable_peers = Queue()
        self.peers = []
        self.piece_manager = PieceManager(torrent)
        self.abort = False
    
    async def start(self):
        self.peers = [PeerConnection(self.avaliable_peers,
                                     self.tracker.torrent.info_hash,
                                     self.tracker.peer_id,
                                     self.piece_manager,
                                     self._on_block_retrieved)
                      for _ in range(MAX_PEER_CONNECTIONS)]

        # The time we last made an announce call (timestamp)
        previous = None
        # Default interval between announce calls (in seconds)
        interval = 30*60

        while True:
            if self.abort:
                logging.info('Aborting...')
                break
                
            current = time.time()
            if (not previous) or (previous + interval < current):
                response = await self.tracker.connect(
                    first=(previous == None),
                    uploaded=0,
                    downloaded=0
                )

                if response:
                    previous = current
                    interval = response.interval
                    # self._empty_queue()

                    logging.info('Got tracker responsed: {}'.format(response))
                    for peer in response.peers:
                        self.avaliable_peers.put_nowait(peer)
            else:
                await asyncio.sleep(5)
        self.stop()

    def _empty_queue(self):
        while not self.avaliable_peers.empty():
            self.avaliable_peers.get_nowait()
    
    def stop(self):
        self.abort = True
        for peer in self.peers:
            peer.stop()
        self.piece_manager.close()
        self.tracker.close()

    def _on_block_retrieved(self, peer_id, piece_index, block_offset, data):
        self.piece_manager.block_received(peer_id, piece_index, block_offset, data)


class Block:
    Missing = 0
    Pending = 1
    Retrieved = 2

    def __init__(self, piece: int, offset: int, length: int) -> None:
        self.piece = piece
        self.offset = offset
        self.length = length
        self.status = Block.Missing
        self.data = None


class Piece:
    def __init__(self, index: int, blocks: [Block], hash_value) -> None:
        self.index = index
        self.blocks = blocks
        self.hash = hash_value

    def reset(self):
        for block in self.blocks:
            block.status = Block.Missing
    
    def next_request(self) -> Block:
        for block in self.blocks:
            if block.status == Block.Missing:
                block.status = Block.Pending
                return block
        return None
    
    def block_received(self, offset: int, data: bytes):
        matches = [b for b in self.blocks if b.offset == offset ]
        block = matches[0] if matches else None
        if block:
            block.status = Block.Retrieved
            block.data = data

        else:
            logging.warning('Trying to put data({}) into non-existing block {}'.format(len(data), offset))
    
    def is_complete(self):
        for block in self.blocks:
            if block.status != Block.Retrieved:
                return False
        return True

    def is_hash_matching(self):
        piece_hash = sha1(self.data).digest()
        return self.hash == piece_hash

    @property
    def data(self):
        blocks_data = [b.data for b in self.blocks]
        return b''.join(blocks_data)


PendingRequest = namedtuple('PendingRequest', ['block', 'added'])

class PieceManager:
    def __init__(self, torrent: Torrent) -> None:
        self.torrent = torrent
        self.peers = {}
        self.pending_blocks = []
        self.missing_pieces = []
        self.ongoing_pieces = []
        self.have_pieces = []
        self.max_pending_time = 5 * 60 * 1000
        self.missing_pieces = self._initiate_pieces()
        self.total_pieces = len(torrent.pieces)

        # check if file already exist
        if os.path.exists(self.torrent.output_file):
            # resume
            logging.info('File existed! Trying to resume...')
            self.fd = os.open(self.torrent.output_file, os.O_RDWR)
            self._resume()
            logging.info('Resume: {} / {} downloaded'.format(len(self.have_pieces), self.total_pieces))

        self.fd = os.open(self.torrent.output_file, os.O_RDWR | os.O_CREAT)
        # self.fd = aiofiles.open(self.torrent.output_file, 'wb')

    def _resume(self):
        torrent = self.torrent
        new_have_pieces = []
        new_missing_pieces = []
        for index, piece in enumerate(torrent.pieces):
            piece_in = os.read(self.fd, torrent.piece_length)
            p = self.missing_pieces[index]
            if sha1(piece_in).digest() == piece:
                new_have_pieces.append(p)
                pub.sendMessage('piece_change', index=index, status=2)
            else:
                new_missing_pieces.append(p)
                pub.sendMessage('piece_change', index=index, status=3)
        
        self.have_pieces = new_have_pieces
        self.missing_pieces = new_missing_pieces

    def _initiate_pieces(self) -> [Piece]:
        torrent = self.torrent
        pieces = []
        total_pieces = len(torrent.pieces)
        std_piece_blocks = math.ceil(torrent.piece_length / REQUEST_SIZE)

        for index, hash_value in enumerate(torrent.pieces):
            if index < (total_pieces - 1):
                blocks = [Block(index, offset * REQUEST_SIZE, REQUEST_SIZE) for offset in range(std_piece_blocks)]
            else:
                # last piece may have few blocks
                last_length = torrent.total_size % torrent.piece_length
                num_blocks = math.ceil(last_length / REQUEST_SIZE)
                blocks = [Block(index, offset * REQUEST_SIZE, REQUEST_SIZE) for offset in range(num_blocks)]

                if last_length % REQUEST_SIZE > 0:
                    # last block of last piece is smaller than usual
                    last_block_length = last_length % REQUEST_SIZE
                    blocks[-1].length = last_block_length
            
            pieces.append(Piece(index, blocks, hash_value))
        return pieces
                
    def close(self):
        if self.fd:
            os.close(self.fd)
    
    @property
    def complete(self):
        return len(self.have_pieces) == self.total_pieces

    @property
    def bytes_downloaded(self) -> int:
        return len(self.have_pieces) * self.torrent.piece_length
    
    @property
    def bytes_uploaded(self) -> int:
        raise NotImplementedError()
    
    def add_peer(self, peer_id, bitfield):
        self.peers[peer_id] = bitfield
    
    def update_peer(self, peer_id, index: int):
        if peer_id in self.peers:
            self.peers[peer_id][index] = 1

    def remove_peer(self, peer_id):
        if peer_id in self.peers:
            del self.peers[peer_id]
    
    def next_request(self, peer_id) -> Block:
        """
        Get the next Block that should be requested from the given peer.

        If there are no more blocks left to retrieve or if this peer does not
        have any of the missing pieces None is returned
        """
        # The algorithm implemented for which piece to retrieve is a simple
        # one. This should preferably be replaced with an implementation of
        # "rarest-piece-first" algorithm instead.
        #
        # The algorithm tries to download the pieces in sequence and will try
        # to finish started pieces before starting with new pieces.
        #
        # 1. Check any pending blocks to see if any request should be reissued
        #    due to timeout
        # 2. Check the ongoing pieces to get the next block to request
        # 3. Check if this peer have any of the missing pieces not yet started
        if peer_id not in self.peers:
            return None
        
        block = self._expired_requests(peer_id)
        if not block:
            block = self._next_ongoing(peer_id)
            if not block:
                block = self._next_missing(peer_id)
                # block = self._get_rarest_piece(peer_id).next_request()
        
        return block

    def block_received(self, peer_id, piece_index, block_offset, data):
        logging.debug('Received block({}) {} of piece {} from peer {}'.format(len(data), block_offset, piece_index, peer_id))

        for index, request in enumerate(self.pending_blocks):
            if request.block.piece == piece_index and request.block.offset == block_offset:
                del self.pending_blocks[index]
                break
        
        pieces = [p for p in self.ongoing_pieces if p.index == piece_index]
        piece = pieces[0] if pieces else None
        if piece:
            piece.block_received(block_offset, data)
            if piece.is_complete():
                if piece.is_hash_matching():
                    pub.sendMessage('piece_change', index=piece.index, status=2)

                    self._write(piece)
                    self.ongoing_pieces.remove(piece)
                    self.have_pieces.append(piece)
                    complete = len(self.have_pieces)
                    logging.info('{} / {} pieces downloaded {:.3f}'.format(complete, self.total_pieces, (complete / self.total_pieces) * 100))
                
                else:
                    logging.warning('Discard corrupt piece {} from peer {}'.format(piece_index, peer_id))
                    piece.reset()
        else:
            logging.warning('The piece {} is not on going'.format(piece_index))
         
    def _expired_requests(self, peer_id) -> Block:
        current = int(round(time.time() * 1000))
        for index, request in enumerate(self.pending_blocks):
            if self.peers[peer_id][request.block.piece]:
                if request.added + self.max_pending_time < current:
                    logging.info('Re-requesting block {} of pieces {}'.format(request.block.offset, request.block.piece))
                    self.pending_blocks[index] = PendingRequest(request.block, current)
                    return request.block
        
        return None
    
    def _get_rarest_piece(self, peer_id) -> Piece:
        piece_count = defaultdict(int)
        for piece in self.missing_pieces:
            if not self.peers[peer_id][piece.index]:
                continue
            for p in self.peers:
                if self.peers[p][piece.index]:
                    piece_count[piece] += 1

        rarest_piece = min(piece_count, key=lambda p: piece_count[p])
        self.missing_pieces.remove(rarest_piece)
        self.ongoing_pieces.append(rarest_piece)

        pub.sendMessage('piece_change', index=rarest_piece.index, status=1)

        return rarest_piece

    def _next_ongoing(self, peer_id) -> Block:
        current = int(round(time.time() * 1000))
        for piece in self.ongoing_pieces:
            if self.peers[peer_id][piece.index]:
                block = piece.next_request()
                if block:
                    self.pending_blocks.append(PendingRequest(block, current))
                    return block
        return None

    def _next_missing(self, peer_id) -> Block:
        current = int(round(time.time() * 1000))
        for index, piece in enumerate(self.missing_pieces):
            piece = self.missing_pieces.pop(index)
            self.ongoing_pieces.append(piece)

            pub.sendMessage('piece_change', index=piece.index, status=1)
            
            return piece.next_request()
        return None

    def _write(self, piece):
        pos = piece.index * self.torrent.piece_length
        os.lseek(self.fd, pos, os.SEEK_SET)
        os.write(self.fd, piece.data)