# location based representation of object
import math
import random
import uuid
from collections import defaultdict

import numpy as np
import torch
from skimage import measure

import visionBodyLearn.utils as utils


class CauseEffectPosition:

    def __init__(self, causeEffectId, actionId, objInstId, positionDelta):
        self.id = causeEffectId
        self.actionId = actionId
        self.objInstId = objInstId
        self.positionDelta = positionDelta

    def __eq__(self, other):
        if self.actionId != other.actionId:
            return False
        # if self


class ObjectDescription:

    def __init__(self, objDescriptionId, coordinates, feature):
        self.id = objDescriptionId
        self.locations = set()
        self.feature = feature
        self.coordinates = coordinates
        self.locationToCoordinateMap = defaultdict(object)
        for coord in coordinates:
            loc = uuid.uuid1()
            self.locations.add(loc)
            self.locationToCoordinateMap[loc] = (coord[0:2])


class ObjectInstance:

    def __init__(self, objInstanceId, objDescriptionId, bbox):
        self.id = objInstanceId
        self.objDescriptionId = objDescriptionId
        self.bbox = bbox

    def getBboxTopLeft(self):
        return self.bbox[0:2]


class BodyInfo:

    def __init__(self, regionInfo):
        self.area = regionInfo.area
        self.area_bbox = regionInfo.area_bbox


class MemoryInfo:

    def __init__(self):
        pass


class HighLevelModel:

    def __init__(self):
        self.beforeDirection = None
        self.beforeSpeedDegree = None
        self.action = None
        self.afterDirection = None
        self.afterSpeedDegree = None

    def toStr(self):
        return str(self.beforeDirection) + "_" + str(self.beforeSpeedDegree) + "_" + str(self.action) + "_" + str(self.afterDirection) + "_" + str(self.afterSpeedDegree)


class Agent:

    def __init__(self, worldDim):
        self.worldHeight = worldDim[0]
        self.worldWidth = worldDim[1]
        self.learnedObjectDescriptions = []  # need to be persisted
        self.matchedObjInst = []
        self.maxODID = 0
        self.maxObjInstId = 0
        self.maxCauseEffectId = 0
        self.currentScene = []
        self.previousScene = []
        self.actionSpace = [0, 1, 2, 3]
        self.learnedCauseEffects = []
        self.temporalMemory = []
        self.constructedObjects = []
        self.previousScene = None
        self.currentScene = None
        self.memoryInfo = None
        self.bodyInfo = None
        self.iterNum = 0
        self.predictionRate = 0
        self.highLevelModels = []
        self.highLevelModelKeys = set()
        self.previousPosition = None
        self.previousVelocity = None
        self.previousAction = None
        self.previousMaxSpeed = None
        self.maxSpeed = 0
        self.targetPosition = (304, 243)
        self.speedDegreeNum = 10
        self.directionDegreeNum = 1
        self.topPercentage = 20

    def getHighLeveSpaceInfoByVelocity(self, velocityInfo):
        direction = self.calDirection(velocityInfo)
        speed = np.sqrt(np.sum(velocityInfo ** 2))
        if speed > self.maxSpeed:
            previousMaxSpeed = self.maxSpeed
            self.maxSpeed = speed
            if previousMaxSpeed != 0:
                self.changeAllSpeedDegree(previousMaxSpeed)
        if self.maxSpeed == 0:
            currentSpeedDegree = 0
        else:
            currentSpeedDegree = int(np.round(speed / self.maxSpeed * self.speedDegreeNum))
        return direction, currentSpeedDegree

    def changeAllSpeedDegree(self, previousMaxSpeed):
        for hlm in self.highLevelModels:
            hlm.beforeSpeedDegree = int(np.round(hlm.beforeSpeedDegree * previousMaxSpeed / self.maxSpeed))
            hlm.afterSpeedDegree = int(np.round(hlm.afterSpeedDegree * previousMaxSpeed / self.maxSpeed))

    def calDirection(self, velocityInfo):
        angle1 = math.atan2(velocityInfo[1], velocityInfo[0])
        angle1 = int(angle1 * 180/math.pi)
        directionDegree = int(np.round(angle1 / self.directionDegreeNum))
        return directionDegree

    def handle(self, observation):
        scene = Agent.getRegionsInTheScene(observation)
        if self.previousScene is None:
            self.previousScene = scene
            return
        self.currentScene = scene
        # find your body first: in the scene, only your body can move
        if self.bodyInfo is None:
            movedRegions = self.detectMotionBetweenTwoScene()
            if len(movedRegions) == 1:  # found motions, mark the moved region as body
                self.bodyInfo = BodyInfo(movedRegions[0])
                torch.save(self.bodyInfo, 'savedModels/bodyInfo.m')
                return
            else:
                return self.chooseRandomAction()
        self.iterNum += 1
        bodyInTheScene = self.findBodyInTheScene(scene)
        if bodyInTheScene is None:
            self.previousPosition = None
            self.previousVelocity = None
            return self.chooseRandomAction()
        currentPosition = bodyInTheScene.centroid
        # print('iter:', self.iterNum, 'position:', currentPosition)
        if self.previousPosition is None:
            self.previousPosition = currentPosition
            # act = self.chooseRandomAction()
            # print('iter', self.iterNum, 'action:', act)
            return self.chooseRandomAction()
        if self.previousVelocity is None:
            self.previousVelocity = np.array(currentPosition) - np.array(self.previousPosition)
            act = self.chooseRandomAction()
            self.previousPosition = currentPosition
            self.previousAction = act
            # print('iter', self.iterNum, 'action:', act)
            return act
        beforeDirection, beforeSpeedDegree = self.getHighLeveSpaceInfoByVelocity(self.previousVelocity)
        currentVelocity = np.array(currentPosition) - np.array(self.previousPosition)
        currentDirection, currentSpeedDegree = self.getHighLeveSpaceInfoByVelocity(currentVelocity)
        highLevelModel = HighLevelModel()
        highLevelModel.beforeDirection = beforeDirection
        highLevelModel.beforeSpeedDegree = beforeSpeedDegree
        highLevelModel.action = self.previousAction
        highLevelModel.afterDirection = currentDirection
        highLevelModel.afterSpeedDegree = currentSpeedDegree
        theKey = highLevelModel.toStr()
        if theKey not in self.highLevelModelKeys:
            self.highLevelModels.append(highLevelModel)
            self.highLevelModelKeys.add(highLevelModel.toStr())
        # print('len of high level modesl', len(self.highLevelModels))
        # prepare for next round
        self.previousVelocity = currentVelocity
        self.previousPosition = currentPosition
        # if the knowledge base is small, do random actions:
        # if len(self.highLevelModels) < 1000:
        #     act = self.chooseRandomAction()
        #     self.previousAction = act
        #     print('iter', self.iterNum, 'action:', act)
        #     return act
        # check if reached the target:
        targetDistance = np.array(currentPosition) - np.array(self.targetPosition)
        targetDistance = np.sqrt(np.sum(targetDistance ** 2))
        if targetDistance <= 10:
            torch.save(self.highLevelModels, 'savedModels/highLevelModels.m')
            torch.save(self.highLevelModelKeys, 'savedModels/highLevelModelKeys.m')
            return -1
        else:  # use the knowledge about action to perform some action:
            neededDirection = self.calDirection(np.array(self.targetPosition) - np.array(currentPosition))
            # print('needed direction:', neededDirection)
            beforeDirectionView = []
            for hlm in self.highLevelModels:
                beforeDirectionDiff = np.abs(hlm.beforeDirection - currentDirection)
                beforeDirectionView.append((beforeDirectionDiff, hlm))
            topHlmNum = int(np.round(len(self.highLevelModels) * self.topPercentage / 100))
            topHlm = self.highLevelModels[0:topHlmNum]
            matchedHlm = None
            if matchedHlm is None:
                act = self.chooseRandomAction()
            else:
                print('matched')
                act = matchedHlm.action
            self.previousAction = act
            # print('iter', self.iterNum, 'action:', act)
            return act

    def detectMotionBetweenTwoScene(self):
        movedRegions = []
        for cr in self.currentScene:
            pr = Agent.findRegionByArea(cr.area, self.previousScene)
            if pr is not None:  # found a regions with the same area
                cbbox = cr.bbox
                pbbox = pr.bbox
                if np.sum(np.array(pbbox) - np.array(cbbox)) != 0:
                    movedRegions.append(cr)
        return movedRegions

    @staticmethod
    def findRegionByArea(area, regions):
        for r in regions:
            if area == r.area:
                return r
        return None

    @staticmethod
    def findRegionByBbox(bbox, regions):
        for r in regions:
            if bbox == r.bbox:
                return r
        return None

    def findBodyBboxInTheTemporalMemory(self, temporalMemory):
        result = set()
        for tm in temporalMemory:
            result.add(tuple(self.findBodyInTheScene(tm).bbox))
        return result

    def findBodyInTheScene(self, theScene):
        for r in theScene:
            if r.area == self.bodyInfo.area:
                return r
        for r in theScene:
            if r.area_bbox == self.bodyInfo.area_bbox:
                return r
        # can not find body, print the reason:
        # for r in theScene:
        #     print('area_bbox', r.area_bbox)
        return None

    def chooseRandomAction(self):
        action = random.randint(0, len(self.actionSpace) - 1)
        return action

    @staticmethod
    def getRegionsInTheScene(theScene):
        encodedImageT = utils.encodeImageTorch(theScene)
        encodedImage = encodedImageT.cpu().numpy()
        label = measure.label(encodedImage, connectivity=2)
        props = measure.regionprops(label)
        return props

    def createCurrentScene(self, theScene):
        self.previousScene = self.currentScene
        self.currentScene = []
        encodedImageT = utils.encodeImageTorch(theScene).unsqueeze(dim=2)
        encodedImage = encodedImageT.numpy()
        label = measure.label(encodedImage, connectivity=2)
        props = measure.regionprops(label)
        # matching regions with learned objects
        for pp in props:
            firstCoord = pp.coords[0][0:2]
            feature = theScene[firstCoord[0], firstCoord[1]]
            if len(self.learnedObjectDescriptions) == 0:
                # if there is no learned object description, just create a new one
                objDescrip = ObjectDescription(self.maxODID, pp.coords, feature)
                self.maxODID += 1
                self.learnedObjectDescriptions.append(objDescrip)
                # after create a new lod, create a new object instance
                objInst = ObjectInstance(self.maxObjInstId, objDescrip.id, pp.bbox)
                self.maxObjInstId += 1
                self.currentScene.append(objInst)
                continue
            foundMatchForTheRegion = False
            for objDescrip in self.learnedObjectDescriptions:
                # if matched, create an object instance
                if self.matchDescription(pp.coords, feature, objDescrip):
                    objInst = ObjectInstance(self.maxODID, objDescrip.id, pp.bbox)
                    self.maxODID += 1
                    self.currentScene.append(objInst)
                    foundMatchForTheRegion = True
                    break
            if not foundMatchForTheRegion:
                # if did not match, create a new learned object description
                objDescrip = ObjectDescription(self.maxODID, pp.coords, feature)
                self.maxODID += 1
                self.learnedObjectDescriptions.append(objDescrip)
                objInst = ObjectInstance(self.maxObjInstId, objDescrip.id, pp.bbox)
                self.maxObjInstId += 1
                self.currentScene.append(objInst)

    def matchObjectBetweenScene(self):
        for objInstPrevious in self.previousScene:
            matchingObjInst = Agent.findNoChangesInTheScene(objInstPrevious, self.currentScene)
            if matchingObjInst:
                matchingObjInst.id = objInstPrevious.id
            else:  # not matching object that stay in the old place, check position change:
                positionChangedObjInst = Agent.findPositionChanges(objInstPrevious, self.currentScene)
                positionChangedObjInst.id = objInstPrevious.id

    def learnCauseEffect(self):
        if self.previousAction is None or len(self.previousScene) == 0:
            return
        for currenObjInst in self.currentScene:
            previousObjInst = self.findObjInstById(currenObjInst.id, self.previousScene)
            currentBbox = currenObjInst.bbox
            previousBbox = previousObjInst.bbox
            if currentBbox != previousBbox:
                existingCEP = self.getCauseEffectPosition(self.previousAction)
                if not existingCEP:
                    positionDelta = (np.array(currentBbox) - np.array(previousBbox))[0:2]
                    newcep = CauseEffectPosition(self.maxCauseEffectId, self.previousAction, currenObjInst.id, positionDelta)
                    self.maxCauseEffectId += 1
                    self.learnedCauseEffects.append(newcep)

    def getCauseEffectPosition(self, actionId):
        for cep in self.learnedCauseEffects:
            if cep.actionId == actionId:  # each atomic action has only one causal rule
                return cep
        return None

    @staticmethod
    def findObjInstById(objInstId, objInstList):
        for objInst in objInstList:
            if objInst.id == objInstId:
                return objInst

    @staticmethod
    def findNoChangesInTheScene(objInst, theScene):
        for objInstInTheScene in theScene:
            if objInst.objDescriptionId == objInstInTheScene.objDescriptionId and objInst.bbox == objInstInTheScene.bbox:
                return objInstInTheScene
        return None

    @staticmethod
    def findPositionChanges(objInst, theScene):
        for objInstInTheScene in theScene:
            if objInst.objDescriptionId == objInstInTheScene.objDescriptionId and objInst.bbox != objInstInTheScene.bbox:
                return objInstInTheScene
        return None

    def matchDescription(self, ppCoord, ppFeature, objectDescription):
        if utils.encodeRGB(objectDescription.feature) != utils.encodeRGB(ppFeature):
            return False
        if ppCoord.shape[0] != objectDescription.coordinates.shape[0]:
            return False
        displacement = ppCoord - objectDescription.coordinates
        firstDiff = utils.encodeCoord(displacement[0], self.worldWidth)
        for d in displacement:
            dEncoded = utils.encodeCoord(d, self.worldWidth)
            if firstDiff != dEncoded:
                return False
        return True

    def getAction(self, targetObjInstId, targetBboxTopLeft):
        if len(self.learnedCauseEffects) < len(self.actionSpace):
            return self.chooseRandomAction()
        else:
            return self.plan(targetObjInstId, targetBboxTopLeft)

    def plan(self, targetObjInstId, targetBboxTopLeft):
        targetObjInst = self.findObjInstById(targetObjInstId, self.currentScene)
        currBboxTopLeft = targetObjInst.getBboxTopLeft()
        currManhattan = utils.manhattan(targetBboxTopLeft, currBboxTopLeft)
        availableAction = []
        # strategy one: try out actions in the mind
        for act in self.actionSpace:
            cep = self.findCauseEffectByActionId(act)
            updatedBboxTopLeft = currBboxTopLeft + cep.positionDelta
            updatedManhattan = utils.manhattan(targetBboxTopLeft, updatedBboxTopLeft)
            if updatedManhattan < currManhattan:
                availableAction.append(act)
        selectAct = random.randint(0, len(availableAction)-1)
        return availableAction[selectAct]

    def checkTargetFininsed(self, targetObjInstId, targetBboxTopLeft):
        targetObjInst = self.findObjInstById(targetObjInstId, self.currentScene)
        currBboxTopLeft = targetObjInst.getBboxTopLeft()
        currManhattan = utils.manhattan(targetBboxTopLeft, currBboxTopLeft)
        if currManhattan == 0:
            print('reached target, no action')
            return True
        else:
            return False

    def findCauseEffectByActionId(self, actionId):
        for cep in self.learnedCauseEffects:
            if cep.actionId == actionId:
                return cep

    @staticmethod
    def printScene(theScene):
        print('there are', len(theScene), 'objects in the scene')
        for ith, o in enumerate(theScene):
            print(str(ith) + ' object instance Id is ', o.id, '; object description Id is', o.objDescriptionId)
