import pickle
from utils import *
from model import *
import os
import sys
from sklearn import ensemble
import random

def init_empty_model():
    return ensemble.RandomForestRegressor(n_estimators=20)

from optparse import OptionParser

usage = "train.py --dir=TRAINING_SET_DIR --model=MODDIR"
parser= OptionParser(usage)

def parse_argv():
    parser.add_option("-a", "--training-dataset", action="store", metavar="DIR", type="string", dest="data_dir",
                      help="directory for training set")
    parser.add_option("-m", "--model", action="store", metavar="DIR", type="string", dest="model_dir",
                      help="directory for training set")
    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(0)
    else:
        (options, args) = parser.parse_args()
        return options.data_dir, options.model_dir


def load_train(input_dir, model_dir, trainfile = "train1", testfrom=20171201):
    train_file = os.listdir(input_dir)[0]
    final_file = model_dir + "/" + trainfile + ".csv"
    if not os.path.exists(final_file):
        print("generating data ~~~")
        df = pd.read_csv(input_dir + "/" + train_file )
        df_train = df[df['date']<= 20191201]  # modify when
        fill_and_shift(df_train, model_dir + "/" + trainfile, shift_days=28)  #produce .csv & .fil
    else:
        print("reading available")
    dfdf = pd.read_csv(final_file)
    dfdf = dfdf[(dfdf['date'] <= testfrom) ]
    print(len(dfdf))
    return dfdf




def run_training(df, category_fields, numeric_fields, yfield, model_dir):
    print("training using model:{}".format(repr(init_empty_model())))


    xfields = category_fields + numeric_fields

    x_train_df, y_train = filter_samples(df, xfields, yfield)
    print("train ({} -> {}) samples on [{}] ".format(len(df), len(x_train_df), yfield))

    x_train, encoder = convert_traindf_to_matrix(x_train_df, category_fields, numeric_fields)

    model = training(x_train, y_train, init_empty_model())

    print("dumping model")
    with open(model_dir + "/" + yfield + ".model", 'wb') as f:
        pickle.dump(model, f)
    print("dumping encoder")
    with open(model_dir + "/" +yfield + ".ohe", 'wb') as f:
        pickle.dump(encoder, f)



if __name__ == "__main__":
    input_dir, model_dir = parse_argv()

    if not os.path.exists(model_dir):
        os.mkdir(model_dir)
    df = load_train(input_dir, model_dir, testfrom=20171201)

    df = feature_engineering(df, reload=True) #delete tmp.csv if you want to re-compute

    category_fields = ['month','city','station']
    numeric_fields = ['days']
  #  shift_fields = ['humidity', 'temperature', 'rain', 'cloud']

    avg_fields = ['humidity', 'temperature',  'cloud','rain' ]

   # numeric_fields += ["{}{}".format(sf, d) for sf in shift_fields for d in [1,2]]

    numeric_fields += get_avg_nfields(avg_fields, FD, WS, SP)
    print(numeric_fields)
    yfields = ['temperature', 'humidity']

    run_training(df, category_fields, numeric_fields, yfields[0], model_dir)


    run_training(df, category_fields, numeric_fields, yfields[1], model_dir)