import pydot
from copy import copy, deepcopy
import random
random.seed(3)

def drop(list, index):
    """Drops an element at a given index from a list.
    
    Keyword arguments:
        list -- a list.
        index -- index of an element to be dropped.
    
    """
    return list[:index]+list[index+1:]

class WrongNumberOfInstantiations(ValueError):
    pass

class InstantiationsNotFoundError(ValueError):
    pass

class PT:
    
    """Class that stores a probability table for a list of variables."""
    
    def __init__(self, nodes, probabilityTable):
        """Initialize the PT class.
        
        Keyword arguments:
        nodes -- list of strings representing node names
        probabilityTable -- list of lines representing an instantiation of nodes 
            and its probability of form [[True, False,..., prob], ...]
        
        """
        # nodes = ["node1", "node2"...]
        # probabilityTable = [[True, False,..., prob], ...]
        
        self.nodes = nodes
        self.probabilityTable = probabilityTable
        
    def __str__(self):
        """Return a string representation of a PT class instance."""
        s = str(self.nodes) + '\n'
        for l in self.probabilityTable:
            for e in l[:-1]:
                s += "%r\t" % e
            s += "%f\n" % l[-1]
        return s
    
    def normalize(self):
        """Normalize the probabilities in the table by summing them up and 
        dividing each with the sum."""
        s = sum( l[-1] for l in self.probabilityTable)
        for l in self.probabilityTable:
            l[-1] /= s
    
    def sample(self, parent_instantiations):
        """Sample a child value conditional on the parent values.
        Return a boolean representing the child value.
        
        Note: This function assumes that:
        - the child node is the last node in the self.nodes list
        - the probabilities in the PT for the same parent values sum up to 1
        
        Keyword arguments:
        parent_instantiations -- dictionary containing instantiated nodes of the
            form: {"node": node_value, ...}
        
        """
        # check if we have instantiations for all the parents
        if len(self.probabilityTable[0][:-2]) != len(parent_instantiations):
            raise WrongNumberOfInstantiations("The length of" \
                "parent_instantiations list should be {0} instead of {1}".format(
                len(self.probabilityTable[0][:-2]), len(parent_instantiations)))
        # extract the probability of the child being true
        for l in self.probabilityTable:
            for node, node_value_PT in zip(self.nodes[:-1], l[:-2]):
                if node_value_PT != parent_instantiations[node]:
                    # continue to the next line in the probabilityTable
                    break
            else:
                # we found the line that matches parent_instantiations
                probTrue = l[-1] if l[-2] else 1 - l[-1]
                # sample according to the probability of the child
                return random.random() < probTrue
        raise InstantiationsNotFoundError("The parent instantiations:" \
                " {0} not found in the PT.".format(parent_instantiations))
    
    def get_probability(self, node_instantiations):
        """Return the probability from the PT given instantiations of all the
        nodes.
        
        Keyword arguments:
        node_instantiations -- dictionary containing instantiated nodes of the
            form: {"node": node_value, ...}
        
        """
        # check if we have instantiations for all the nodes
        if len(self.probabilityTable[0][:-1]) != len(node_instantiations):
            raise WrongNumberOfInstantiations("The length of" \
                "node_instantiations list should be {0} instead of {1}".format(
                len(self.probabilityTable[0][:-1]), len(node_instantiations)))
        # extract the probability of node_instantiations
        for l in self.probabilityTable:
            for node, node_value_PT in zip(self.nodes, l[:-1]):
                if node_value_PT != node_instantiations[node]:
                    # continue to the next line in the probabilityTable
                    break
            else:
                # we found the line that matches node_instantiations
                return l[-1]
        raise InstantiationsNotFoundError("The node instantiations:" \
                " {0} not found in the PT.".format(node_instantiations))

def multiplyPTs(PTs):
    """Multiply a set of  probability tables by matching them by their common 
    variables.
    
    Keyword arguments:
    PTs -- list of probability tables.
        
    """
    def mul(pt1, pt2):
        """Multiply two probability tables by matching them by their common 
        variables.
        
        Keyword arguments:
        pt1 -- first probability table.
        pt2 -- second probability table.
        
        """
        
        # identify the common variables
        commonVars = list(set(pt1.nodes) & set(pt2.nodes))
        # dictionaries containing the index of each variable
        i1 = dict([(pt1.nodes[i],i) for i in xrange(len(pt1.nodes))])
        i2 = dict([(pt2.nodes[i],i) for i in xrange(len(pt2.nodes))])
        
        # initialize a new probability table to store the result
        pt = PT(pt1.nodes+[var for var in pt2.nodes if not var in commonVars], [])
        
        # find matching lines and multiply them
        for line1 in pt1.probabilityTable:
            for line2 in pt2.probabilityTable:
                # if the lines match
                # Markov popravek - brute force metoda crashne, ce ne das default vrednosti za reduce  
                if reduce(lambda x,y: x and y, 
                          [line1[i1[var]]==line2[i2[var]] for var in commonVars], True):
                    pt.probabilityTable.append(
                        line1[:-1] +
                        [line2[i] for i in xrange(len(line2)-1) if pt2.nodes[i] not in commonVars] +
                        [line1[-1]*line2[-1]])
        return pt
    
    return reduce(lambda pt1,pt2: mul(pt1,pt2), PTs)

def sumOutVar(var, pt):
    """Sum out a variable from a probability table.
    
    Keyword arguments:
    var -- string containing variable name.
    pt -- probability table containing var
        
    """
    varI = pt.nodes.index(var)
    newPt = PT(drop(pt.nodes,varI), [])
    probs = [line for line in pt.probabilityTable]

    while len(probs)>0:
        # select first line from pt
        tempLine = drop(probs.pop(0), varI)
        
        # identify matching lines and sum the probabilities
        removeIndices = []
        for i in xrange(len(probs)):
            if drop(probs[i],varI)[:-1]==tempLine[:-1]:
                tempLine[-1] += probs[i][-1]
                removeIndices.append(i)
        
        newPt.probabilityTable.append(tempLine)
        
        # remove the summed out lines
        for i in removeIndices:
            probs.pop(i)
    return newPt

class BN:
    
    """Class describing a bayesian network."""
    
    def __init__(self, nodes, probabilities):
        """Initialize the BN class.
        
        Keyword arguments:
        nodes -- list of strings representing node names.
        probabilities -- a dictionary of probability tables for the nodes of 
        form: {node: PT([parents], prob_table)}
        
        """
        
        # nodes = ["node1", "node2"...]
        # probabilities = {node: PT([parents], prob_table)}
                
        self.nodes = nodes
        self.probabilities = probabilities
    
    def parents(self, node):
        """Return the parents of a given node.
        
        Keyword arguments:
        node -- string containing node name.
        
        """
        return [parent for parent in self.probabilities[node].nodes
                if parent != node]
    
    def to_dot(self):
        """Convert the network into a dot graph."""
        g = pydot.Dot()
        
        for child in self.nodes:
            for parent in self.probabilities[child].nodes:
                if parent != child:
                    g.add_edge(pydot.Edge(parent, child))
        return g
    
    def to_png(self, filename):
        """Save the network as a Graphviz generated png image.
        
        Keyword arguments:
        filename -- string containing filename without the .png extension.
        
        """
        
        g = self.to_dot()
        g.write_png(filename + ".png", prog='dot')
        
    def to_pdf(self, filename):
        """Save the network as a Graphviz generated pdf image.
        
        Keyword arguments:
        filename -- string containing filename without the .pdf extension.
        
        """
        
        g = self.to_dot()
        g.write_pdf(filename + ".pdf", prog='dot')
        
    def getInteractionGraph(self):
        """Construct the interaction graph for the network. The graph is 
        represented as a {node:[neibhour_nodes]} dictionary."""
        interactionGraph = {}
        
        # for each node
        for node in self.nodes:
            interactionGraph[node] = []
            for otherNode in self.nodes:
                # add the node's parents to the graph
                if node == otherNode:
                    interactionGraph[node].extend(\
                    [n for n in self.probabilities[node].nodes if n!=node])
                # add the node's child to the graph
                elif node in self.probabilities[otherNode].nodes:
                    interactionGraph[node].append(otherNode)
        return interactionGraph
        
    def minDegreeOrder(self, ignoreList):
        """Compute variable elimination order according to the min-degree 
        heuristic. Yield the variables one by one while updating the interaction 
        graph.
        
        Keyword arguments:
        ignoreList -- variables that should not be eliminated and are not 
            included in the elimination order (usually query variables).
        
        """
        interactionGraph = self.getInteractionGraph()
        
        while len(interactionGraph)>len(ignoreList):
            # find the node with the smallest degree that is not in the 
            # ignoreList
            for node in sorted(interactionGraph, 
                               key=lambda x: len(interactionGraph[x])):
                if not node in ignoreList:
                    minNode = node
                    break
            
            # update the interaction graph by adding edges between neighbor 
            # nodes of the minimal node that were not previously connected
            for node in interactionGraph[minNode]:
                for otherNode in interactionGraph[minNode]:
                    if not otherNode in interactionGraph[node] and \
                       node != otherNode:
                        interactionGraph[node].append(otherNode)
            for node in interactionGraph[minNode]:
                interactionGraph[node].remove(minNode)
            
            # remove the minimal node
            interactionGraph.pop(minNode)
            
            yield minNode
            
    def minFillOrder(self, ignoreList):
        """Compute variable elimination order according to the min-fill 
        heuristic. Yield the variables one by one while updating the interaction 
        graph.
        
        Keyword arguments:
        ignoreList -- variables that should not be eliminated and are not 
            included in the elimination order (usually query variables).
        
        """
        interactionGraph = self.getInteractionGraph()
        
        while len(interactionGraph)>len(ignoreList):
            # initialize the minimal node and its score
            minNode = None
            minCnt = len(self.nodes)
            
            for node in self.nodes:
                if not node in ignoreList:
                    cnt = 0
                    # count all non-connected neighbor nodes
                    for n1 in interactionGraph[node]:
                        for n2 in interactionGraph[node]:
                            if not n2 in interactionGraph[n1] and \
                               n1 != n2:
                                cnt += 1
                # each pair was counted twice
                cnt /= 2
                if cnt < minCnt:
                    minNode = node
                    minCnt = cnt
            
            # update the interaction graph by adding edges between neighbor 
            # nodes of the minimal node that were not previously connected
            for node in interactionGraph[minNode]:
                for otherNode in interactionGraph[minNode]:
                    if not otherNode in interactionGraph[node] and \
                       node != otherNode:
                        interactionGraph[node].append(otherNode)        
            for node in interactionGraph[minNode]:
                interactionGraph[node].remove(minNode)
            
            # remove the minimal node 
            interactionGraph.pop(minNode)
            
            yield minNode
    
    def variableElimination(self, variables, evidence = None, order = None):
        """Perform inference using the variable elimination algorithm.
        
        Keyword arguments:
        variables -- list of strings containing names of variables the 
            probability of which we would like to infer
        evidence -- dictionary containing the instantiation of evidence 
            variables of form: {"var1":True,...}. If not specified, an a priori
            marginal distribution is computed.
        order -- iterable collection of nodes to be eliminated defining the 
            order in which they are eliminated. If not specified, the variables
            are eliminated in the order they are listed in the network.
        
        """
        # variables = ["var1"...]
        # evidence = {"var1":True,...}
        # order = iterable collection of nodes to be eliminated
        # returns P(var1,...)
        
        # trivial order
        if order == None:
            order = [node for node in self.nodes if not node in variables]
        
        # if evidence is given, the factors must be reduced first
        if evidence == None:
            factors = self.probabilities.values()
        else:
            factors = deepcopy(self.probabilities.values())
            
            for factor in factors:
                # find the evidence variables in the current factor
                eVars = list(set(evidence.keys()) & set(factor.nodes))
                if len(eVars) != 0:
                    newPT = []
                    indices = dict([(node, factor.nodes.index(node))
                                    for node in eVars])
                    # add lines that match the evidence to the new probability 
                    #table
                    for line in factor.probabilityTable:
                        match = True
                        for var in eVars:
                            if line[indices[var]] != evidence[var]:
                                match = False
                                break
                        if match:
                            newPT.append(line)
                    factor.probabilityTable = newPT
                    
        for var in order:
#            DEBUG:
#            print var
            relevantFactors = [factor for factor in factors if var in factor.nodes]
#            DEBUG:
#            print 'Relevant factors:'
#            print '-'*30
#            for f in relevantFactors:
#                print f
#            print '-'*30
            newFactor = multiplyPTs(relevantFactors)
#            DEBUG:
#            print 'New factor:'
#            print newFactor
#            print
            newFactor = sumOutVar(var, newFactor)
#            DEBUG:
#            print 'Summed out new factor:'
#            print newFactor
            
            for factor in relevantFactors:
                factors.remove(factor)
            
            factors.append(newFactor)
            
        finalFactor = multiplyPTs(factors)
        
        # if evidence was given the final factor must be normalized
        if evidence != None:
            finalFactor.normalize()
            
        return finalFactor
    
    def _dfsvisit(self, node, visited, order):
        """Visit unvisited nodes in depth-first fashion and append
        them to a topological ordering of nodes.

        node -- string representing the first node to visit
        visited -- set representing already visited nodes
        order -- list representing the current topological ordering of the nodes
        
        Based on code from http://www-users.cs.york.ac.uk/~jc/teaching/agm/gPy/
        
        """
        for parent in self.parents(node):
            if parent not in visited:
                self._dfsvisit(parent, visited, order)
        visited.add(node)
        order.append(node)
    
    def _topological_order(self):
        """Return a topological ordering of the nodes (children come after their
        parents in the ordering).
        Return a list representing the topological ordering of the nodes.
        
        Based on code from http://www-users.cs.york.ac.uk/~jc/teaching/agm/gPy/
        
        """
        visited = set()
        order = []
        for node in self.nodes:
            if node not in visited:
                self._dfsvisit(node, visited, order)
        return order
    
    def _info_for_sampling(self):
        """Create auxiliary data structures for generating a sample from the BN.
        Return a triple (parent_indices, cpts, order), where:
            order -- list of nodes sorted in a topological order according to
                the BN
            parent_indices -- list of lists where each list contains the indices
                of the parents (w.r.t. the order list) of the corresponding node
                in the order list
            cpts -- list of probability tables of the corresponding nodes in the
                order list
        
        """
        order = self._topological_order()
        nodes_to_indices = {}
        for i, node in enumerate(order):
            nodes_to_indices[node] = i
        parent_indices = []
        for i, node in enumerate(order):
            parent_indices.append([nodes_to_indices[parent] for parent in
                                   self.parents(node)])
        cpts = [self.probabilities[node] for node in order]
        return order, parent_indices, cpts
    
    def forward_sample(self, iterations=1000):
        """Return a forward sample generator of the BN with no instantiated
        variables. Each iteration will generate an Example from the full joint
        distribution defined by the BN.
        
        Based on code from http://www-users.cs.york.ac.uk/~jc/teaching/agm/gPy/
        
        """
        order, parent_indices, cpts = self._info_for_sampling()
        
        n = len(order)
        indices = range(n)
        t = 0
        while t < iterations:
            inst = [None]*n
            for i in indices:
                parent_inst = dict((order[j], inst[j]) for j in parent_indices[i])
                inst[i] = cpts[i].sample(parent_inst)
            yield Example(inst)
            t += 1
    
    def likelihood_weighting(self, evidence, iterations=1000):
        """Return a likelihood weighting generator of the BN with the given
        evidence. Each iteration will generate an Example from the full joint
        distribution defined by the BN and its weight.
        
        Based on code from http://www-users.cs.york.ac.uk/~jc/teaching/agm/gPy/
        
        Keyword arguments:
        evidence -- dictionary containing instantiated nodes of the form:
            {"node": node_value, ...}
        
        """ 
        order, parent_indices, cpts = self._info_for_sampling()
        
        indices = []
        uninstantiated = set() 
        inst = []
        for i, node in enumerate(order): 
            indices.append(i)
            if node in evidence: 
                inst.append(evidence[node]) 
            else:
                uninstantiated.add(i) 
                inst.append(None)
        
        t = 0 
        while t < iterations:
            inst = copy(inst)
            weight = 1
            for i in indices:
                # create a dictionary with instantiations of the parents
                node_inst = dict((order[j], inst[j]) for j in parent_indices[i])
                if i in uninstantiated:
                    inst[i] = cpts[i].sample(node_inst) 
                else:
                    # add the instantiation of the current node to node_inst
                    node_inst[order[i]] = inst[i]
                    weight *= cpts[i].get_probability(node_inst)
            yield Example(inst, weight)
            t += 1
            
class WrongNumberOfVariables(ValueError):
    pass

class WeightedExampleError(ValueError):
    pass

class Example():
    
    """Class for storing example values and its weight."""
    
    def __init__(self, values, weight=1.0):
        """Initialize the Example class.
        
        Keyword arguments:
        values -- list of booleans representing the values of the corresponding
            variables
        weight -- float represeting the weight of the example
        
        """
        self.values = values
        self.weight = weight
    
    def __getitem__(self, i):
        return self.values[i]
    
    def __iter__(self):
        return self.values.__iter__()
        
    def __len__(self):
        return len(self.values)
    
    def __str__(self):
        return "(vals:{0}, weight:{1})".format(self.values, self.weight)

class ExampleTable():
    
    """Class for storing and computing with unweighted examples."""
    
    def __init__(self, variables):
        """Initialize the ExampleTable class.
        
        Keyword arguments:
        variables -- list of strings representing variable names
        
        """
        self.variables = variables
        self.data = []
        
        # create a dictionary mapping from variables to their in indices in
        # self.variables
        self.vars_to_indices = dict((var, i) for i, var in
                                    enumerate(self.variables))
    
    def __len__(self):
        return len(self.data)
    
    def append(self, example):
        """Append the given example to the ExampleTable.
        
        Keyword arguments:
        example -- Example representing the example to be added to the
            ExampleTable
        
        """
        if len(example) != len(self.variables):
            raise WrongNumberOfVariables("Example {0} should have {1} values" \
                "instead of {2})".format(example, len(self.variables),
                len(example)))
        if example.weight != 1:
            raise WeightedExampleError("All examples should have weight equal" \
                "to 1. Or use WeigtedExampleTable instead.")
        self.data.append(example)

    def extend(self, examples):
        """Extend the ExampleTable with the given list of examples.
        
        Keyword arguments:
        examples -- list of Examples to be added to the ExampleTable
        
        """
        for example in examples:
            self.append(example)
    
    def to_pebl_file(self, filename):
        """Save the ExampleTable in the PEBL file format.
        
        Note: Currently it only supports saving ExampleTable where all examples
        have weight equal to 1.
        
        Keyword arguments:
        filename -- string representing the path where to save the ExampleTable
        
        """
        with open(filename, 'w') as f:
            f.write("# PEBL tab-delimited data file\n")
            variables = "\t".join(["{0},class(true, false)".format(v) for v in
                                   self.variables])
            f.write(variables+"\n")
            for ex in self.data:
                ex_str = "\t".join(["true" if value else "false" for value in
                                    ex])
                f.write(ex_str+"\n")
    
    def get_cond_prob(self, variable_values, evidence_values={}):
        """Return the conditional probability of variables with values from
        variable_values given the evidence from evidence_values approximated
        with examples from the ExampleTable.
        
        Note: If evidence_values equals {}, the method returns the marginal
            probability of variable_values relative to joint distribution
            approximated with examples from the ExampleTable.
        
        Keyword arguments:
        variable_values -- dictionary containing instantiated nodes of the form:
            {"node": node_value, ...}
        evidence_values -- dictionary containing instantiated nodes of the form:
            {"node": node_value, ...}
        
        """
        # number of examples that match the variable_values and evidence_values
        n_vars = 0
        # number of examples that match the evidence_values
        n_evidence = 0
        
        for ex in self.data:
            evidence = False
            for var in evidence_values.iterkeys():
                if ex[self.vars_to_indices[var]] != evidence_values[var]:
                    break
            else:
                # we found an example that matches evidence_values
                evidence = True
                n_evidence += 1
            if evidence:
                # only check this example if it already matched the
                # evidence_values 
                for var in variable_values.iterkeys():
                    if ex[self.vars_to_indices[var]] != variable_values[var]:
                        break
                else:
                    # we found an example that matches variable_values
                    n_vars += 1
        return 1.*n_vars/n_evidence

class WeightedExampleTable(ExampleTable):
    
    """Class for storing and computing with weighted examples."""
    
    def __init__(self, evidence, *args, **kwargs):
        """Initialize the ExampleTable class.
        
        Keyword arguments:
        evidence -- dictionary containing instantiated nodes of the form:
            {"node": node_value, ...}
        
        """
        ExampleTable.__init__(self, *args, **kwargs)
        
        self.evidence = evidence
        self.sum_of_weights = 0
    
    def append(self, example):
        """Append the given example to the ExampleTable.
        
        Keyword arguments:
        example -- Example representing the example to be added to the
            ExampleTable
        
        """
        if len(example) != len(self.variables):
            raise WrongNumberOfVariables("Example {0} should have {1} values " \
                "instead of {2})".format(example, len(self.variables),
                len(example)))
        self.data.append(example)
        self.sum_of_weights += example.weight
    
    def to_pebl_file(self, filename):
        """Save the ExampleTable in the PEBL file format.
        
        Note: Currently it only supports saving ExampleTable where all examples
        have weight equal to 1.
        
        Keyword arguments:
        filename -- string representing the path where to save the ExampleTable
        
        """
        raise WeightedExampleError("Currently, only unweighted examples can be" \
            " exported to the PEBL file format.")
    
    def get_cond_prob(self, variable_values):
        """Return the conditional probability of variables with values from
        variable_values given the evidence from self.evidence approximated
        with examples from the ExampleTable.
        
        Note: The examples in the ExampleTable were generated in such a way that
        all of them have the right value of evidence variables.
        
        Keyword arguments:
        variable_values -- dictionary containing instantiated nodes of the form:
            {"node": node_value, ...}
        
        """
        # weighted sum of examples that match the variable_values
        sum_vars = 0
        for ex in self.data: 
            for var in variable_values.iterkeys():
                if ex[self.vars_to_indices[var]] != variable_values[var]:
                    break
            else:
                # we found an example that matches variable_values
                sum_vars += ex.weight
        return 1.*sum_vars/self.sum_of_weights

if __name__=='__main__':
    def getExampleNetwork1():
        # EXAMPLE NETWORK 1:
        nodes = ['Winter', 'Sprinkler', 'Rain', 'Wet_grass', 'Slippery_road']
        pts = {}
        
        pts['Winter'] = PT(['Winter'], [[True,   0.6],
                                        [False,  0.4]])
        
        pts['Sprinkler'] = PT(['Winter','Sprinkler'], [[True,  True,   0.2],
                                                       [True,  False,  0.8],
                                                       [False, True,   0.75],
                                                       [False, False,  0.25]])
        
        pts['Rain'] = PT(['Winter','Rain'], [[True,  True,   0.8],
                                             [True,  False,  0.2],
                                             [False, True,   0.1],
                                             [False, False,  0.9]])
        
        pts['Wet_grass'] = PT(['Sprinkler','Rain','Wet_grass'], 
                              [[True,  True,  True,   0.95],
                               [True,  True,  False,  0.05],
                               [True,  False, True,   0.9],
                               [True,  False, False,  0.1],
                               [False, True,  True,   0.8],
                               [False, True,  False,  0.2],
                               [False,  False, True,  0.0],
                               [False,  False, False, 1.0]])
        
        pts['Slippery_road'] = PT(['Rain','Slippery_road'], [[True,  True,   0.7],
                                                             [True,  False,  0.3],
                                # EXAMPLE NETWORK 1:                             [False, True,   0.0],
                                                             [False, False,  1.0]])
        return BN(nodes, pts)
    
    def getExampleNetwork2():
        # EXAMPLE NETWORK 2:
        nodes = ['A', 'B', 'C']
        ptA = PT(['A'], [[True,   0.6],
                         [False,  0.4]])
        ptB = PT(['A', 'B'], [[True,  True,   0.9],
                              [True,  False,  0.1],
                              [False, True,   0.2],
                              [False, False,  0.8]])
        ptC = PT(['B', 'C'], [[True,  True,   0.3],
                              [True,  False,  0.7],
                              [False, True,   0.5],
                              [False, False,  0.5]])
        
        return BN(nodes, {'A':ptA, 'B':ptB, 'C':ptC})
        
    exampleNetwork1 = getExampleNetwork1()
    
#    badOrder = ['Sprinkler', 'Rain', 'Winter', 'Wet_grass']
#    
#    inferVars = ['Wet_grass', 'Slippery_road']
#    evidence = {'Winter':True, 'Sprinkler':False}
#    e = exampleNetwork1.variableElimination(inferVars, evidence = evidence,
#                                            order = exampleNetwork1.minDegreeOrder(inferVars))
#    print e
#        
#    exampleNetwork1.to_png('test')
    
#    evidence = {"Winter": True}
#    
#    data = ExampleTable(exampleNetwork1.nodes)
#    wdata = WeightedExampleTable(evidence, exampleNetwork1.nodes)
#    
#    m = 1000
#    for sample in exampleNetwork1.forward_sample(m):
#        data.append(sample)
#        
#    for sample in exampleNetwork1.likelihood_weighting(evidence, m):
#        wdata.append(sample)
#    
#    print data.get_cond_prob({"Wet_grass": True}, evidence)
#    print wdata.get_cond_prob({"Wet_grass": True})
        
#    data.to_pebl_file("../../data/winter{0}.txt1".format(m))
    
#    evidence = {"Rain": True, "Sprinkler": False}   
#    for sample in net1.likelihood_weighting(evidence, 10):
#        print sample
    
    exampleNetwork2 = getExampleNetwork2()


    print exampleNetwork2.variableElimination(['C'], evidence = {'B':True})
#    
##    b.to_png('test')