#-*-encoding:utf8-*-

import pandas as pd

def format_srcDataset_to_dict(src):
    _dict = {}
    for i in src:
        #print i
        _dict.setdefault(i[0],[])
        _dict[i[0]].append(i[1:])
    return _dict


def format_targetDataset_to_dict(target):
    _dict = {}
    for i in target:
        #print i
        _dict.setdefault(str(i[0])+'_'+str(i[1]),0)
        _dict[str(i[0])+'_'+str(i[1])] = i[2]
    return _dict


def sample_specified(full_dict,area_id):
    #**************sample selection***********************
    _dict = {}
    for keys in full_dict:
        #print keys
        #print date_dict[keys]
        for i in range(len(full_dict[keys])):
            #print i
            #print  date_dict[keys][i][0] 
            if full_dict[keys][i][0] == area_id:
                #print date_dict[keys][i]
                _dict.setdefault(keys,[]) 
                _dict[keys].append(full_dict[keys][i])
    return _dict
    #*****************************************************


#---------------------------analysis dict structure
def analysis_sample_dataset_dict(_dict):
    time_key = _dict.keys()
    time_key.sort()
    #print time_key
        
    for i in time_key:
        print i,':',_dict[i]


#*******************************make matrix to sklearn***********************
def convert_srcdict_to_matrix(dataset_dict,area_ci_sample_list,areaId):
    #****************************************************
    #copy area_ci list,as new enumerate obj.
    area_ci_ls = list(area_ci_sample_list)
    area_ci_ls.sort()
    
    #convert into list format. such as [[92,13,45,...166],[..]...[]]
    _matrix = {}
    #every point-in-time
    for timestr in dataset_dict.keys():
        #every ci of an area. default value set to 0.
        _ls = {}
        for ci in area_ci_ls:
            _ls.setdefault(ci,0)
            #enumerate sample data, convert into matrix.
            #convert list data into matrix, accoding column 

            for i in dataset_dict[timestr]:
                for n in range(len(dataset_dict[timestr])):
                    if str(dataset_dict[timestr][n][2])+'_'+str(dataset_dict[timestr][n][1]) == ci:  
                        #on ci matrix values condition.
                        _ls[ci]=dataset_dict[timestr][n][3]
                        #dataset_dict['201603202355'].remove(i)

        _ls_keys = _ls.keys()
        _ls_keys.sort()
        _ls_out = []
        
        for ci in area_ci_ls:
            _ls_out.append(str(_ls[ci]))

        #**********************train_set**************************
        _matrix.setdefault(timestr+'_'+str(areaId),','.join(_ls_out))    
    return _matrix


def convert_dict_to_list(_dict):
    #_mtx = pd.Series(_srcmatrix)
    #_dict = _mtx.to_dict()
    _X = []
    for ks in _dict.keys():
        #print ks
        #print train_set_dict[ks]
        _X.append([int(i.strip()) for i in _dict[ks].split(',')])
    return _X


def join_train_dataSets(X,Y,bak_filename):
    #**************join train src data and target values.
    src_mtx = pd.Series(X)
    target_mtx = pd.Series(Y)

    concated_mtx = pd.concat([src_mtx,target_mtx],join='inner',axis=1)
    _set_dict = concated_mtx.to_dict()
    #print train_set_dict
    concated_mtx.to_csv(bak_filename)
    return _set_dict


def separate_dataSets_into_XY(_set_dict):
    """
    separate datasets
    """
    _X = []
    _Y = []
    _times = set()
    for ks in _set_dict.keys():
        #print ks
        #print train_set_dict[ks]
        for _k in _set_dict[ks]:
            if ks == 0:
                _times.add(_k)
                _X.append([int(i) for i in _set_dict[ks][_k].split(',')])
            elif ks == 1:
                _Y.append(_set_dict[ks][_k])
    print '\ntimes in dataset:\n',_times
    return _X,_Y

