import sys
import numpy as np
import csv
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as Axes3D

ESSENTIAL = 1234567890.

def load_test_data(parcel_uri, features_uri, parcel_vs_features_uri):
    """Load the experiment CSV data: pu, spec, and puyvspr files.
    
        Inputs:
            - parcel_uri: the parcel data, formatted as 'pu.dat'
            - features_uri: conservation feature data, formatted as 'spec.dat'
            - parcel_vs_features_uri: per-parcel environment features, 
                formatted as 'puvspr.dat'
              
        Outouts:
            - a tuple: (conservation feature matrix, parcel cost vector)
                - conservation feature matrix M: 
                    a PxF matrix with: 
                        - P = number of parcels,
                        - F = number of conservation features
                    where the element M[p,f] is the amount of conservation 
                    feature 'f' present on parcel 'p'.
                - parcel cost vector C: a Px1 vector where element 'p' is the
                  investment cost required to conserve every feature present on
                  parcel 'p'.
                """
    # Builds a species_id-(species_name, species_order) dictionary: 
    #   species_dic[id] = (species_name, species_order)
    #   species_dic[700] = ('Karstic_Streams,0')
    #   species_dic[710] = ('Medium_River_non_Karstic', 1)
    #   ...
    #   species_dic[639] = ('Spring', n)
    #
    # The species id can be any number and ids are not ordeder or contiguous.
    # I create 0-based contiguous indices, I create a new variable, 'species_order'
    # which is the order in which the species are read in the file.
    features_dic = {}
    features_order = 0
    with open(features_uri) as features_data:
        features_reader = csv.reader(features_data)
        features_reader.next()
        for row in features_reader:
            features_dic[row[0]] = (row[4], features_order)
            features_order += 1
    # Builds a parcel-cost dictionary of type:
    #   pu_dic[id] = cost
    pu_dic = {}
    with open(parcel_uri) as pu_data:
        pu_reader = csv.reader(pu_data)
        pu_reader.next()
        for row in pu_reader:
            pu_dic[row[0]] = row[1]
    species_count = len(features_dic)
    parcel_count = len(pu_dic)
    parcel_costs = np.array(pu_dic.values()).astype(float)
#    parcel_costs = np.zeros(parcel_count)
#    for p in pu_dic.keys():
#        parcel_costs[int(p)-1] = pu_dic[p]
    print('parcel, costs:')
    for parcel in range(parcel_costs.size):
        print(parcel, parcel_costs[parcel])

    #print('species:', species_count, 'parcels:', parcel_count)
    feature_matrix = np.zeros((parcel_count, species_count))
    row_count = 0
    with open(parcel_vs_species_uri) as ps_data:
        ps_reader = csv.reader(ps_data)
        ps_reader.next()
        for row in ps_reader:
            species_ID = row[0]
            species_count = features_dic[species_ID][1]
            pu_ID = int(row[1]) - 1
            species_amount = row[2]
            feature_matrix[pu_ID, species_count] = species_amount
            row_count += 1

    # Save feature_matrix to CSV file:
#    with open('feature_matrix.csv', 'w') as csv_file:
#        writer = csv.writer(csv_file)
#        for row in range(feature_matrix.shape[0]):
#            writer.writerow(feature_matrix[row,])
#            print('row:', row, feature_matrix[row,])

    return (feature_matrix, parcel_costs)

def extract_SA_run_summary(SA_run_summary_uri):
    with open(SA_run_summary_uri) as SA_data:
        row_count = 0
        row_size = 0
        SA_reader = csv.reader(SA_data)
        for row in SA_reader:
            row_size = len(row)
#            print('row', row_count, 'length=',row_size)
            row_count += 1
        
        SA_values = np.zeros((row_count, row_size)).astype(float)

        SA_data.seek(0)
        row_count = 0
        for row in SA_reader:
            for column in range(len(row)):
#                print('item:', row_count, column, row[column])
                value = 0
                if row[column]:
                    value = row[column]
                else:
                    value = ESSENTIAL

                SA_values[row_count, column] = value
            row_count += 1

    return SA_values

def extract_SA_portfolio(SA_run_summary):
    SA_portfolio = []
    for scenario in range(SA_run_summary.shape[1]):
        SA_portfolio.append([])
        used_parcels = np.nonzero(SA_run_summary[:, scenario] >= 0)
        for parcel in used_parcels[0]:
            SA_portfolio[scenario].append(parcel)
    return SA_portfolio

def make_test_data(parcel_count = 10, feature_count = 5):
    """ Create a R x C matrix of conservation features where:
       - Row(i): parcel(i)
       - Col(j): feature(j)
       - element[i,j]: fraction of conservation feature j in parcel i
     So: feature amounts sum up to 1 across parcels: columns sum up to 1.
    """
    # Arrange the list in an array
    feature_matrix = np.random.rand(parcel_count, feature_count)
    # Normalize columns to sum up to 1 
    feature_sum = np.sum(feature_matrix, axis = 0)
    for f in range(feature_count):
        feature_matrix[:,f] /= feature_sum[f]
    # Should return ones
    #print('per-feature sum:', np.sum(feature_matrix, axis=0)) 

    # Create a cost vector for each parcel with values between 10 and 50
    costs = np.random.rand(parcel_count) * 40 + 10
    # Create a penalty vector for each environmental feature (values 1000:2000)
    penalties = np.random.rand(feature_count) * 1000 + 2000
    return (feature_matrix, costs)

def make_objective(feature_matrix, proportion=0.1):
    """ Translate feature proportions to absolute values wrt feature_matrix

        Inputs:
            - feature_matrix: a PxF matrix where a row is a parcel and a column
              is the amount of feature in this parcel.
            - proportion: minimum proportion of any feature we want in the
              parcel portfolio. 
                - If nothing is specified: assume a uniform proportion of 0.1 
                  across all features. 
                - If one number is pecified: use it as the uniform proportion
                  across all features.

        Outputs:
            - A Fx1 vector where the element 'i' is the minimum amount of
              feature 'i' we want in the optimized parcel portfolio.

        Algorithm:
            - Find the total amount of each feature there is in feature_matrix.
            - Create a vector where each item is the proportion we want for
              each feature 'i'.
            - Multiply each feature amount with the the corresponding 
              proportion in the vector."""
    # Sanity checks
    assert(proportion >= 0.)
    assert(proportion <= 1.)
    
    feature_sum = np.sum(feature_matrix, axis = 0)
    feature_proportion = np.ones_like(feature_sum) * proportion

    return feature_sum * feature_proportion

def meet_objective(objective, feature_matrix, parcel_cost, input_portfolio = []):
    """ Algorithm to optimize the problem:
     0 - Initialization w.r.t. initial portfolio:
       - objective_left
       - optimized_cost
       - available_features

     1- Compute feature amount normalized by cost for each parcel i:
     - normalized_features[i] = sum(min(feature_matrix[i,], objective_left)) /
     parcel_cost[i]
     2- Pick the parcel with the highest feature value:
       - best_parcel = argmax(normalized_feature)
     3- Update cost and objective left:
       - optimized_cost += parcel_cost[best_parcel]
       - objective_left = max(objective_left - feature_matrix[best_parcel])
     4- Remove the parcel from the parcel list by assigning a very low amount
        of features so that it will never be selected again.
     5- if sum(objective_left) == 0, return optimized_cost, 
        otherwise goto step 1 """
    # Check if objective can be met:
    feature_content = np.sum(feature_matrix, axis = 0)
    excess_features = feature_content - objective
    if np.min(excess_features) < 0.000001:
        return (0.0, [])

    # Step 0:
    # Default values if portfolio is empty
    objective_left = np.zeros(objective.shape)
    objective_left[:] = objective
    optimized_cost = 0.0
    available_features = np.zeros(feature_matrix.shape)
    available_features[:,:] = feature_matrix
    # Used to replace a row after use to avoid choosing the same parcel twice:
    no_feature = feature_matrix[0,:] * 0.0
    # Since the portfolio might not be empty, update variables accordingly:
    portfolio = []
    portfolio[:] = input_portfolio 
    portfolio_features = []
    #print('initial portfolio', portfolio)
    for parcel in portfolio:
        #print('parcel', parcel)
        optimized_cost += parcel_cost[parcel]

    # Optimize until the objective is met
    while np.sum(objective_left):
        (parcel_count, feature_count) = available_features.shape
        # step 1: for each parcel, normalize the sum of features by the cost
        cost_normalized_features = np.zeros(parcel_count)
        for p in range(parcel_count):
            # The features that are relevant to us are only those that
            # are needed to meet our objective:
            #   -more feature than needed? -> only use what we need (minimum)
            #   -less feature than needed? -> only use what we have (minimum)
            # In both cases, we take the min(available_features, objective_left)
            #
            # What we do: stack features row-wise, compare them
            # element-by-element (row-wise), and take the min.
            stacked_features = np.vstack((available_features[p,:], objective_left))
            relevant_features = np.min(stacked_features, axis = 0)
            print('relevant features', relevant_features)
            # We'll normalize by the cost, so we have to avoid a division by 
            # zero: 
            #   -if cost[p] == 0: offset the denominator to avoid 0/0
            #   -if cost[p]  > 0: don't offset the denominator
            zero_div_offset =  1. if cost[p] == 0. else 0.
            # Normalize the feature by the cost.
            cost_normalized_features[p] = np.sum(relevant_features) / \
                                            (parcel_cost[p] + zero_div_offset)
            print('total feature', np.sum(relevant_features), 'cost', parcel_cost[p], 'normalized', cost_normalized_features[p])
        # step 2:
        best_parcel = np.argmax(cost_normalized_features)
        portfolio.append(best_parcel)   # Add best parcel to the portfolio
        portfolio_features.append(cost_normalized_features[best_parcel])
        # step 3:
        optimized_cost += parcel_cost[best_parcel]
        objective_left -= available_features[best_parcel,:]
        objective_left[objective_left < 0] = 0  # values >= 0
        # step 4:
        available_features[best_parcel,:] = no_feature   # remove this parcel

    return (optimized_cost, portfolio, portfolio_features)

def add_features(portfolio, feature_matrix):
    cumulated_features = np.zeros_like(feature_matrix[0,:])
    for parcel in portfolio:
        cumulated_features += feature_matrix[parcel,:]
    return cumulated_features

def normalize_feature_matrix(feature_matrix):
    # Make a copy, so that we don't alter the original
    normalized_feature_matrix = np.copy(feature_matrix)
    # Normalize columns to sum up to 1 
    feature_sum = np.sum(normalized_feature_matrix, axis = 0)
    for f in range(len(feature_sum)):
        normalized_feature_matrix[:,f] /= feature_sum[f]
    # Should return ones
    #print('per-feature sum:', np.sum(normalized_feature_matrix, axis=0)) 
    return normalized_feature_matrix

# Fake data:
#(feature_matrix, cost) = make_test_data(parcel_count = 156, feature_count = 15)

#print(np.sum(feature_matrix, axis = 0))
#print(objective)

# Filenames:
parcel_uri = 'pu.dat'
species_uri = 'spec.dat'
parcel_vs_species_uri = 'puvspr.dat'
marxan_run_summary = 'marxan_run_summary.csv'

# Load parcel and feature data 
(feature_matrix, cost) = load_test_data(parcel_uri, species_uri, parcel_vs_species_uri)
# Normalize the features
feature_matrix = normalize_feature_matrix(feature_matrix)
<<<<<<< local
# Load Marxan's run data
SA_run_summary = extract_SA_run_summary(marxan_run_summary)
=======
SA_run_summary = extract_SA_run_summary('marxan_run_summary.csv')
DP_run_summary = np.zeros_like(SA_run_summary)
pu_used = (SA_run_summary >= 0).astype(float)
>>>>>>> other
SA_portfolio = extract_SA_portfolio(SA_run_summary)
SA_portfolio_size = np.sum((SA_run_summary >= 0).astype(int), axis = 0)

#print('portfolio', SA_portfolio[0], SA_portfolio_size)
#print('cost', cost[SA_portfolio[0]], np.sum(cost[SA_portfolio[0]]))
#print('all costs:', cost)
#sys.exit()
# Compute the cost of each Marxan run across the 49 scenarios:
#   0)- SA_run_summary is the matrix from the csv file on disk where:
#       - unused parcels are encoded as -1
#       - used parcels are either 0, delta_cost, or the huge number 1234567890
#   So any number >=0 is a used parcel.
pu_used = (SA_run_summary >= 0).astype(float)   # used parcels=1, else 0.0
SA_cost = np.zeros_like(pu_used[0,:])   # Initialize the cost vector
for objective in range(SA_cost.size):
    # Only multiply the used parcels by their cost (else 0.0)
    # Sum up the costs and store the result in the cost vector
    SA_cost[objective] = np.sum(pu_used[:,objective] * cost)

# Initialize dynamic programming data
no_feature = feature_matrix[0,:] * 0.0
DP_run_summary = np.ones_like(SA_run_summary) * -1.
DP_cost = np.zeros_like(SA_cost)
DP_portfolio_size = np.zeros_like(SA_portfolio_size)
pct_delta_cost = []
parcel_essentiality = np.zeros_like(SA_run_summary)
for scenario in range(1): #DP_cost.size):
    proportion = float(scenario + 1) / 50.  # Ranges from 0.02 to 0.98
    # Create a vector the same size as the features in 'feature_matrix'
    # and initialize every element with 'proportion'
    objective = make_objective(feature_matrix, proportion)
    # Optimize portfolio cost for the given objective
    DP_cost[scenario], DP_portfolio, DP_portfolio_features = \
        meet_objective(objective, feature_matrix, cost)
    DP_portfolio_size[scenario] = len(DP_portfolio)
    # Sum up the features in the portfolios returned by both methods (SA & DP)
    DP_added_features = add_features(DP_portfolio, feature_matrix)
    SA_added_features = add_features(SA_portfolio[scenario], feature_matrix)
    # Print out information on the screen:
    #   For both Marxan (SA) and Dynamic Programming (DP):
    #       - closest feature to objective (1 is a perfect match w/objective)
    #       - Average proportion of features conserved beyond the objective 
    #           (average of the above)
    print('scenario: %.2f' % (float(scenario+1)/50.), \
        'SA minimum: %.3f' % np.min(SA_added_features / objective), \
        '(%.3f)' % np.sum(SA_added_features / objective / objective.size), \
        'DP: %.3f' % np.min(DP_added_features / objective), \
        '(%.3f)' % np.sum(DP_added_features / objective / objective.size), \
        len(DP_portfolio))
    for parcel in range(len(DP_portfolio)):
        print(DP_portfolio[parcel], DP_portfolio_features[parcel])

#    for parcel in DP_portfolio:
#        new_matrix = np.zeros(feature_matrix.shape)
#        new_matrix[:,:] = feature_matrix
#        new_matrix[parcel,:] = no_feature   # remove this parcel
#        new_cost, new_scenario = optimize_objective(objective, new_matrix, cost)
#        if new_scenario:
#            delta_cost = new_cost - DP_cost[scenario]
#        else:
#            delta_cost = -ESSENTIAL
#        parcel_essentiality[parcel, scenario] = delta_cost
#        DP_run_summary[parcel, scenario] = delta_cost
#        #if delta_cost < 0.0:
#        loss = int(delta_cost/DP_cost[scenario]*10000.0)/100.
#        pct_delta_cost.append(loss)
#        print('parcel', parcel, \
#            #'d_feature', delta_feature \
#            'd_cost', delta_cost, \
#            'loss', loss)

# Save marxan run summary to CSV file:
#print('SA:', SA_portfolio_size)
#print(SA_cost)
with open('marxan_run_summary.csv', 'w') as csv_file:
    writer = csv.writer(csv_file)
    for row in range(SA_run_summary.shape[0]):
        writer.writerow(SA_run_summary[row,])

# Save dynamic programming run to CSV file:
#print('DP:', DP_portfolio_size)
#print(DP_cost)
with open('DP_run_summary.csv', 'w') as csv_file:
    writer = csv.writer(csv_file)
    for row in range(DP_run_summary.shape[0]):
        writer.writerow(DP_run_summary[row,])

# Save SA-DP comparison data to CSV file:
with open('Marxan_DP_comparison.csv', 'w') as csv_file:
    writer = csv.writer(csv_file)
    writer.writerow(['portfolio size (Marxan)', 'cost (Marxan)', \
        'cost (dynamic prog.)', 'portfolio size (dynamic prog.)'])
    for row in range(len(SA_portfolio_size)):
        writer.writerow([SA_portfolio_size[row], SA_cost[row], \
            DP_cost[row], DP_portfolio_size[row]])
# Figures:
# Print portfolio size:
width = .35 / 50.
ind = (np.arange(len(SA_portfolio_size)) + .5) / 50.
fig = plt.figure()
ax = fig.add_subplot(211)

SA_bars = ax.bar(ind, SA_portfolio_size, width, color = 'r')
DP_bars = ax.bar(ind+width, DP_portfolio_size, width, color = 'y')

plt.title('Marxan vs. Dynamic Programming on Cost Optimization')
plt.ylabel('number of parcels in a portfolio')
ax.grid(True)

ax.legend((SA_bars[0], DP_bars[0]), \
    ('portfolio size (SA)', 'portfolio size (DP)'), 'upper center', shadow=True)

# Print portfolio cost
ax = fig.add_subplot(212)

SA_line = ax.step(ind, SA_cost, 'r')
DP_line = ax.step(ind, DP_cost, 'y')

plt.xlabel('objective (fraction of features included)')
plt.ylabel('cost of a portfolio')
ax.grid(True)
#ax.xticks()
ax.legend((SA_line[0], DP_line[0]), \
    ('portfolio cost (SA)', 'portfolio cost (DP)'), 'upper center', shadow=True)

# Print re-runs
#ax = fig.add_subplot(223)

#pct_delta_cost = np.array(pct_delta_cost)
#plt.plot(pct_delta_cost, '.')
#plt.xlabel('re-run index')
#plt.ylabel('% portfolio cost increase')
#plt.ylim(-20., np.max(pct_delta_cost))
#ax.grid(True)

# Print re-runs
#ax = fig.add_subplot(224)

#parcel_essentiality = np.array(parcel_essentiality)
#plt.plot(parcel_essentiality, '.')
#plt.xlabel('objective (fraction of features included)')
#plt.ylabel('% portfolio cost increase')
#plt.ylim(-20., np.max(pct_delta_cost))


plt.show()
