import os.path
import tensorflow as tf
import numpy as np
import sys
from method1 import train
from Evaluate import Evaluate
from Translate import Translate
from Routes import Route, Routes
from pre_processing import readRoutes
from sklearn import svm
from sklearn.ensemble import RandomForestRegressor
import cPickle as pickle
from sklearn import linear_model
from Evaluate import Evaluate
from Esembler import Esembler
from sklearn.ensemble import GradientBoostingRegressor

def load_data(path_name, mode):
    raw = np.load(path_name)
    if mode == 'train' or mode == 'eval':
        return raw['datas'], raw['labels']
    else:
        return raw['tests'], None

def feedTasks():
    ans = {}
    reg = {}
    ground_truth = {}
    if task == 'test' or task == 'eval':
        freg =  file('./problem1/' + method + part + '.txt', 'r')
        reg = pickle.load(freg)
    for i in range(100, 124):
        data, labels = load_data('./data_links/' + dataset[0]+ 'data' + str(i) + '_' + part + '_modified.npz', dataset[0])
        if(len(dataset) > 1):
            data2, labels2 = load_data('./data_links/' + dataset[1]+ 'data' + str(i)+ '_' + part + '_modified.npz', dataset[1])
            data = np.concatenate((data, data2))
            labels = np.concatenate((labels, labels2))
        tianshu = data.shape[0]
        feature_num = data.shape[1]
        print('Dealing with model %d' % i)
        if method == 'nn':
            g = tf.Graph()
            with g.as_default():
                ans[i] = train(data, labels, './problem1/log/'+str(i)+ '_' + part, './problem1/model/'+str(i)+ '_' + part, task,display_step = 10, FEATURE_NUM = feature_num)
        elif task == 'train':
            for j in range(labels.shape[1]):
                if method == 'rf':
                    reg[(i,j)] = RandomForestRegressor()
                elif method == 'svm':
                    reg[(i,j)] = svm.SVR()
                elif method == 'lr':
                    reg[(i,j)] = linear_model.LinearRegression()
                else:
                    print "No this method"
                reg[(i,j)].fit(data, labels[:,j])
        elif task == 'test' or task == 'eval':
            predicted_vec = []
            for j in range(6):
                predicted_vec.append(reg[(i,j)].predict(data))
            # 6 * tianshu
            ans[i] = np.array(predicted_vec).swapaxes(0,1)
            ground_truth[i] = labels
    if task == 'train':
        freg =  file('./problem1/' + method + part + '.txt', 'w')
        pickle.dump(reg, freg)        
    if part == 'part0':
        part1_ret, part1_ground_truth2 = feedTasks(task, dataset, method, 'part1')
    if task == 'test' or task == 'eval':
        ret = np.zeros((6, tianshu, 6), dtype=float)
        ground_truth2 = np.zeros((6, tianshu, 6), dtype=float)
        routes = Routes()
        readRoutes(routes)
        intersection_id = ['A', 'A', 'B', 'B', 'C', 'C']
        tollgate_id = [2, 3, 1, 3, 1, 3]
        for i in range(6):
            for k in routes.routes:
                if k.tollgate_id == tollgate_id[i] and k.intersection_id == intersection_id[i]:
                    for u in k.link_seq:
                        ret[i] = ret[i] + ans[u]
                        if task == 'eval':
                            ground_truth2[i] = ground_truth2[i] + ground_truth[u]    
        # if task == 'eval':
        #     evaluate = Evaluate()
        #     MAPE = evaluate.evaluate_test(ret, ground_truth2)
        #     print 'MAPE=' + str(MAPE)
        if part == 'part0':
            ret = np.concatenate((ret, part1_ret), axis=2)
            ground_truth2 = np.concatenate((ground_truth2, part1_ground_truth2), axis=2)    
        return ret, ground_truth2
    return None, None

class Task1Whole:
    def __init__(self):
        pass
    def load_data(self, train_dataset = ['train'], test_dataset = ['test'], eval_dataset = ['eval']):
        suffix = '_pca'
        self.train_data = []
        self.train_label = []
        self.eval_data = []
        self.eval_label = []
        self.test_data = []
        for part in range(2):
            raw = np.load('./data_task1/traindata_part%d%s.npz' % (part, suffix))        
            if('train' in train_dataset and 'eval' in train_dataset):
                self.train_data.append(raw['datas'])
                self.train_label.append(raw['labels'])
            elif('train' in train_dataset):
                self.train_data.append(raw['datas'][:-7])
                self.train_label.append(raw['labels'][:,:-7])
            elif('eval' in train_dataset):
                self.train_data.append(raw['datas'][-7:])
                self.train_label.append(raw['labels'][:,-7:])

            if('train' in eval_dataset and 'eval' in eval_dataset):
                self.eval_data.append(raw['datas'])
                self.eval_label.append(raw['labels'])
            elif('train' in eval_dataset):
                self.eval_data.append(raw['datas'][:-7])
                self.eval_label.append(raw['labels'][:,:-7])
            elif('eval' in eval_dataset):
                self.eval_data.append(raw['datas'][-7:])
                self.eval_label.append(raw['labels'][:,-7:])        

            if('train' in test_dataset and 'eval' in test_dataset):
                self.test_data.append(raw['datas'])
            elif('train' in test_dataset):
                self.test_data.append(raw['datas'][:-7])
            elif('eval' in test_dataset):
                self.test_data.append(raw['datas'][-7:])

            if(self.eval_data[-1].shape[0] > 26):
                self.eval_data[-1] = self.eval_data[-1][-26:]
                self.eval_label[-1] = self.eval_label[-1][:,-26:]
            # if test, overlap all
            raw2 = np.load('./data_task1/testdata_part%d%s.npz' % (part, suffix))
            if( 'test' in test_dataset):
                if(len(self.test_data) <= part):
                    self.test_data.append(raw2['tests'])
                else:
                    self.test_data[part] = raw2['tests']

    def train(self, methods = []):
        assert(len(self.train_data) > 0) 
        reg = {}    
        for m in methods:   
            for part in range(2):
                for i in range(self.train_label[part].shape[0]):  # path
                    for j in range(self.train_label[part].shape[2]): # time seg
                        if m == 'svm':
                            reg[(m,part,i,j)] = svm.SVR()
                        elif m == 'rf':
                            reg[(m,part,i,j)] = RandomForestRegressor(n_estimators=100,random_state=0)
                        elif m == 'lr':
                            reg[(m,part,i,j)] = linear_model.LinearRegression()
                        elif m == 'gb':
                            reg[(m,part,i,j)] = GradientBoostingRegressor(n_estimators=100, learning_rate=0.1,max_depth=1, random_state=0, loss='ls')
                        elif m == 'huber':
                            reg[(m,part,i,j)] = linear_model.HuberRegressor(epsilon=1.35,max_iter=100,alpha=0.0001)
                        else:
                            print 'No this method'
                        reg[(m,part,i,j)].fit(self.train_data[part], self.train_label[part][i,:,j])
                        print '%s score: ' % m, reg[(m,part,i,j)].score(self.train_data[part], self.train_label[part][i,:,j])
                        if m == 'lr':
                            print reg[(m,part,i,j)].coef_
        freg =  file('./problem1/models.txt', 'w')
        pickle.dump(reg, freg)

    def test(self, methods = [], real_test_data = None):
        if(real_test_data is None):
            real_test_data = self.test_data
        assert(len(self.train_data) > 0)     
        assert(len(real_test_data) > 0)
        freg =  file('./problem1/models.txt', 'r')
        reg = pickle.load(freg)
        predicted_vec = []
        for m in methods:   
            for part in range(2):
                for i in range(self.train_label[part].shape[0]):  # path
                    for j in range(self.train_label[part].shape[2]): # time seg
                        predicted_vec.append(reg[(m,part,i,j)].predict(real_test_data[part]))
        ans = np.array(predicted_vec).reshape(len(methods), 2, self.train_label[part].shape[0], self.train_label[part].shape[2], real_test_data[part].shape[0])
        ans = ans.swapaxes(1,4).swapaxes(1,2)
        ans = np.concatenate((ans[:,:,:,:,0], ans[:,:,:,:,1]), axis=3)
        return ans
    def eval(self, methods = ['svm']):
        ans = self.test(methods = methods, real_test_data = self.eval_data)
        evaluate = Evaluate()
        ground_truth = np.concatenate((self.eval_label[0], self.eval_label[1]), axis=2)
        for i in range(len(methods)):
            # for j in range(ground_truth.shape[0]):
            #     MAPE = evaluate.evaluate_test(np.array([ans[i,j]]), np.array([ground_truth[j]]))
            #     print '%s %d MAPE=' % (methods[i],j) + str(MAPE)
            MAPE = evaluate.evaluate_test(ans[i], ground_truth)
            print '%s MAPE=' % methods[i] + str(MAPE)

if __name__ == '__main__':
    model_names = ['rf', 'svm', 'lr', 'gb', 'huber']
    weights = [10,20,15,6,10]
    a = Task1Whole()
    a.load_data(eval_dataset=['eval'])
    if sys.argv[1] == 'train':
        a.train(methods = model_names)
        a.eval(methods = model_names)
    if sys.argv[1] == 'test':
        # models0 = a.test(methods = model_names, real_test_data = a.train_data)
        # g0 = np.concatenate((a.train_label[0], a.train_label[1]), axis=2)
        models1 = a.test(methods = model_names, real_test_data = a.test_data)
        g1 =  np.concatenate((a.eval_label[0], a.eval_label[1]), axis=2)
        ans = np.zeros(models1[0].shape)
        for i in range(models1.shape[0]):
            ans = ans + models1[i] * weights[i]
        ans = ans / (1. * sum(weights))
        # e = Esembler()
        # e.createLinearEsemble(models0, g0)
        # ans = e.esemble(models1)
        evaluate = Evaluate()
        MAPE = evaluate.evaluate_test(ans, g1)
        print '%s MAPE=' % 'esemble' + str(MAPE)
        tr = Translate()
        tr.translate(ans, task = 1)
        tr.translate(g1, path='./ground_truth', task = 1)
# if __name__ == '__main__':
#     model_names = ['rf', 'svm', 'lr']
#     if sys.argv[1] == 'train':
#         all = ['train']
#         for name in model_names:
#             feedTasks('train', all, method = name)
#     elif sys.argv[1] == 'test':
#         models0 = []
#         models1 = []
#         models2 = []
#         for name in model_names:
#             ans0,g0 = feedTasks('eval', ['train'], method = name)
#             ans1,g = feedTasks('eval', ['eval'], method = name)
#             ans2,dummy = feedTasks('test', ['test'], method = name)
#             models0.append(ans0)
#             models1.append(ans1)
#             models2.append(ans2)
        # e = Esembler()
        # e.createLinearEsemble(models0, g0)
        # ans = e.esemble(models1)
        # tr = Translate()
        # tr.translate(ans, task = 1)
