#! /usr/bin/env python
scale_dict = {'average_power': (200, 0),
              'beta': (0.1, 0),
              'per': (0.3, 0),
              'average_delta': (200, 0),
              'mean': (1, -3),
              'rms': (20, 0),
              'variance': (200, 0),
              'arpha_time': (200, 0),
              'beta_time': (500, 0),
              'skewness': (0.2, -0.7),
              'kurtosis': (2, 0),
              'average_alpha': (200, 0),
              'delta_time': (100 ,0),
              'theta_time': (140,0),
              'delta_alpha': (15, 0),
             'delta_beta': (50, 0),
             'delta_theta': (4, 0),
             'theta_alpha': (10, 0),
              'theta_beta': (3, 0),
              'alpha_beta': (10, 0),
              'deltatheta_alphabeta': (0.5, 0),
             'total_data': (270000 , 0), 
             'aver_alpha': (150 , 0), 
             'aver_beta': ( 30, 0), 
             'aver_theta': ( 300, 0), 
             'aver_delta': (500 , 0),
             'entropy_power': (0.5, 0),
             'local_min_max_alpha': (50 ,0),
             'local_min_max_beta': (70 ,0),
             'local_min_max_theta': (180, 0),
             'local_min_max_delta': (220, 0),
             'entropy_alpha': (0.8, 0),
             'entropy_beta': (0.8, 0),
             'entropy_theta': (0.8, 0),
             'entropy_delta': (0.8, 0),
             'max_alpha': (15, 0),
             'max_beta': (10, 0),
             'max_theta': (60, 0),
             'max_delta': (450, 0)
              }
             
inter_string_dict = {0: 'average_power',
                     1: 'per',
                     2: 'per',
                     3: 'per',
                     4: 'per',
                     5: 'aver_alpha',
                     6: 'aver_beta',
                     7: 'aver_theta',
                     8: 'aver_delta',
                     9: 'delta_alpha',
                     10: 'delta_beta',
                     11: 'delta_theta',
                     12: 'theta_alpha',
                     13: 'theta_beta',
                     14: 'alpha_beta',
                     15: 'deltatheta_alphabeta',  
                     16: 'mean',
                     17: 'rms',
                     18: 'variance',
                     19: 'skewness',
                     20: 'kurtosis',
                     21: 'arpha_time',
                     22: 'beta_time',
                     23: 'delta_time',
                     24: 'theta_time', 
                     25: 'entropy_power', 
                     26: 'local_min_max_alpha', 
                     27: 'local_min_max_beta',
                     28: 'local_min_max_theta',
                     29: 'local_min_max_delta',
                     30: 'entropy_alpha',
                     31: 'entropy_beta',
                     32: 'entropy_theta',
                     33: 'entropy_delta',
                     34: 'max_alpha',
                     35: 'max_beta',
                     36: 'max_theta',
                     37: 'max_delta'
                     }
def scale_data(data_list, scale_tuple):
    MAX_SCALE = 1
    MIN_SCALE = 0
    max_data = scale_tuple[0]
    min_data = scale_tuple[1]
    #print "%s %s"% (max_data, min_data)
    # if max_data < 1:
    #   max_data = 0.3
    #   min_data = 0
    # elif max_data < 100:
    #   max_data = 1.5
    #   min_data = 0
    # elif max_data < 2000:
    #   max_data = 100
    #   min_data = 0
    # else:
    #   max_data = 200
    #   min_data = 0 
    print "================================"
    print max(data_list)
    print min(data_list)
    print max_data
    print min_data
    print sum(data_list) / len(data_list)
    print "\n"
    data_scale_list = []
    for data in data_list:
        data_scale = (data - min_data)/(max_data - min_data) * (MAX_SCALE - MIN_SCALE) + MIN_SCALE   
        data_scale_list.append(data_scale)
    return data_scale_list

def scale_file(srcfilename, desfilename):  
    f_src = open(srcfilename, 'r')
    x_features_list = []
    scale_features_list = []
    scale_epocs_list = []
    for line in f_src:
        x_features = line.strip().split()
        x_features_list.append(x_features)
    f_src.close()

    if x_features_list:
        feature_length = len(x_features)
        #print feature_length
        for feature_index in range(feature_length):
            print feature_index
            features_data = [float(features[feature_index]) for features in x_features_list]
            #print sum(features_data)/len(features_data)
            scale_features = scale_data(features_data,scale_dict[inter_string_dict[feature_index]])
            scale_features_list.append(scale_features)

        epocs_length = len(scale_features)
        for epocs_index in range(epocs_length):
            scale_epocs = [str(data[epocs_index]) for data in scale_features_list]
            scale_epocs_list.append(scale_epocs)

        f_des = open(desfilename, 'w')
        for epocs in scale_epocs_list:
            f_des.write("  ".join(epocs))
            if epocs != len(scale_epocs_list):
                f_des.write('\n')

def caculate_average(srcfilename):
    f_src = open(srcfilename, 'r')
    x_features_list = []
    scale_features_list = []
    scale_epocs_list = []
    for line in f_src:
        x_features = line.strip().split()
        x_features_list.append(x_features)
    f_src.close()
    if x_features_list:
        feature_length = len(x_features)
        for feature_index in range(feature_length):
            features_data = [float(features[feature_index]) for features in x_features_list]
            print sum(features_data)/len(features_data)
print scale_dict
scale_file('r.txt.38.20140123', 'scale_r')
scale_file('wake.txt.38.20140123', 'scale_wake')
scale_file('n1.txt.38.20140123', 'scale_n1')
scale_file('n2.txt.38.20140123', 'scale_n2')
scale_file('n3.txt.38.20140123', 'scale_n3')

# #scale_file('exp.txt', 'scale_exp')
# caculate_average('exp20140123.txt')