#!/usr/bin/env python
# coding: utf-8

import warnings
warnings.filterwarnings("ignore")
import sys
import numpy as np
import pandas as pd
from minepy import MINE
from itertools import combinations
from sklearn.neighbors import KNeighborsClassifier

class Feature:
    def __init__(self, ID, minFC):
        self.ID = ID
        self.minFC = minFC

    def getID(self):
        return self.ID

    def getMinFC(self):
        return self.minFC


def get_data(dataPath):
    data = pd.read_csv(dataPath, sep="\t", header=None)
    data = data.T
    data.drop(0, inplace=True)
    label = data[0]
    del data[0]
    label = np.array(label)
    labelList = np.unique(label)
    for i in range(0, len(label)):
        if label[i] == labelList[0]:	# convert string to int(0 or 1)
            label[i] = 0
        else:
            label[i] = 1

    label = label.astype('int')

    return data, label


#dataPath = sys.argv[1]
#data, label = get_data(dataPath)
#data = np.array(data)

# python implementation of the pseudocode in the paper
def McOne(data, label, r):
    n_features = data.shape[1]
    Subset_featureList = []
    mine = MINE(alpha=0.6, c=15)
    for i in range(1, n_features + 1):
        mine.compute_score(data[:, i - 1], label)
        if mine.mic() >= r:
            Subset_featureList.append(Feature(i, mine.mic()))

    numSubset = len(Subset_featureList)
    # then sort the list in descending order by minFC
    Subset_featureList.sort(key=lambda Feature: Feature.getMinFC(),
                            reverse=True)

    e = 1
    while e <= numSubset:
        q = e + 1
        while q <= numSubset:
            mine.compute_score(data[:, e - 1], data[:, q - 1])
            if mine.mic() >= Subset_featureList[q - 1].getMinFC():
                del Subset_featureList[q - 1]
                numSubset = numSubset - 1
            else:
                q = q + 1
        e = e + 1

    return Subset_featureList


#Subset = McOne(data=data, label=label, r=0.2)
#Subset = [feature.getID() for feature in Subset]  # we won't use minFC any more
#Subset = np.array(Subset)


# the step2 of the McTwo algorithm is a kind of graphical algorithm
# and I found a pseudocode list in this paper(attached):[https://alex.smola.org/workshops/sigir10/contrib/Dang.pdf]
# the following is my implementation of this best-first search algorithm
class Node:
    def __init__(self, size, featureList):
        self.size = size
        self.featureList = featureList
        self.adjacentList = []
        self.performance = 0
        self.inNodes = True
        self.inPriority = True

    def appendAdjacent(self, node):
        self.adjacentList.append(node)

    def getSize(self):
        return self.size

    def getFeatureList(self):
        return self.featureList

    def getAdjacentList(self):
        return self.adjacentList

    def getPerformance(self):
        return self.performance

    def setPerformance(self, p):
        self.performance = p
        
    def clearAdjacent(self):
        self.adjacentList = []
        
    def isInNodes(self):
        return self.inNodes
    
    def outOfNodes(self):
        self.inNodes = False

    def isInPriority(self):
        return self.inPriority

    def outOfPriority(self):
        self.inPriority = False


def getConbination(List, n):  # an auxiliary method
    resultList = []
    for i in combinations(List, n):
        resultList.append(i)
    return resultList


def buildInitialGraph(featureSet, upperBoundFeatures):
    """build an undirected graph"""

    nodeLevel = []
    for i in range(1, upperBoundFeatures + 1):
        nodeLevel.append(getConbination(featureSet, i))

    n_levels = upperBoundFeatures
    nodes = []
    nodeLevelList = [[] for i in range(n_levels)]
    for i in range(0, n_levels):
        for featureVector in nodeLevel[i]:
            nodes.append(Node(i + 1, featureVector))
            nodeLevelList[i].append(Node(i + 1, featureVector))

    for i in range(0, n_levels - 1):
        j = i + 1
        for node1 in nodeLevelList[i]:
            for node2 in nodeLevelList[j]:
                if set(node1.getFeatureList()) < set(node2.getFeatureList()):
                    node1.appendAdjacent(node2)
                    node2.appendAdjacent(node1)

    return nodes, nodeLevelList


def computePerformance(data, label, node):   # an auxiliary method
    features = np.array(node.getFeatureList())
    X = data[:, features]
    y = label
    neigh = KNeighborsClassifier(n_neighbors=3)
    neigh.fit(X, y)
    mAccurancy = neigh.score(X, y)

    return mAccurancy


def Argmax(List):	# an auxiliary method
    List.sort(key=lambda Node: Node.getPerformance(), reverse=True)
    MAX = List[0]
    return MAX


def bestFirstSearchProcedure(data, label, nodes, nodeLevelList, best):
    P = []
    v = nodes[0]
    v.setPerformance(computePerformance(data, label, v))
    P.append(v)
    count = 0
    while len(P) > 0:
        v = Argmax(P)
        P.remove(v)
        v.outOfPriority()
        if v in nodes:
            nodes.remove(v) # the only place where nodes changes in this method
        v.outOfNodes()  # remove the node from nodes
        if v.getPerformance() > best.getPerformance():
            best = v
        else:
            count = count + 1
            if count >= 5:
                return best, nodes
        for adjacentNode in v.getAdjacentList():
            if adjacentNode.isInNodes() and adjacentNode.isInPriority():
                adjacentNode.setPerformance(
                    computePerformance(data, label, adjacentNode))
                P.append(adjacentNode)

        if len(nodes) == 0:  # may not necessary, I think
            break

    return best, nodes


def rebuildGraph(nodes):   # rebuild an undirected graph
    tempList = [node.getSize() for node in nodes]
    tempList = np.unique(tempList)
    n_levels = tempList[-1]

    nodeLevelList = [[] for i in range(n_levels)]

    for node in nodes:
        # the following line will change nodes, so the method should return nodes
        node.clearAdjacent()
        nodeLevelList[node.getSize() - 1].append(node)

    for i in range(0, n_levels - 1):
        j = i + 1
        for node1 in nodeLevelList[i]:
            for node2 in nodeLevelList[j]:
                if set(node1.getFeatureList()) < set(node2.getFeatureList()):
                    node1.appendAdjacent(node2)
                    node2.appendAdjacent(node1)

    return nodes, nodeLevelList


def stepTwo(data, label, Subset, upperBoundFeatures):
    nodes, nodeLevelList = buildInitialGraph(Subset, upperBoundFeatures)
    best = Node(1, [])
    while len(nodes) > 0:
        best, nodes = bestFirstSearchProcedure(data, label, nodes, nodeLevelList, best)
        if(len(nodes)==0):
            break
        nodes, nodeLevelList = rebuildGraph(nodes)

    return best


#if len(Subset) > 7:
#    upperBoundFeatures = 7
#else:
#    upperBoundFeatures = len(Subset)
#featureNode = stepTwo(data, label, Subset, upperBoundFeatures)	# Subset is the output of McOne
#print("features: ",featureNode.getFeatureList()) 

# because this algorithm is implemented in Python directly, it may be very time-consuming
# and I'm not sure whether it will produce a correct result


def McTwo(data,label):
    Subset = McOne(data=data, label=label, r=0.2)
    Subset = [feature.getID() for feature in Subset]
    Subset = np.array(Subset)
    if len(Subset) > 7:
        upperBoundFeatures = 7
    else:
      upperBoundFeatures = len(Subset)
    featureNode = stepTwo(data, label, Subset, upperBoundFeatures)	# Subset is the output of McOne
    return featureNode.getFeatureList()