import os

import numpy as np
import tensorflow as tf
from sklearn.preprocessing import StandardScaler

os.environ["CUDA_VISIBLE_DEVICES"] = '2'
count = 0
count_collect = 0

# =============== config ========================
npy_dir = '/home/ddy/projects/emotions/iemocap_4emo_spectrogram'

tfrecord_dir = '/home/ddy/projects/emotions/iemocap_4emo_spectr_norm_limitlen'

# npy_dir = '/Users/d/Project/emotions/data/iemocap_4emo_spectrogram_simple'
#
# tfrecord_dir = '/Users/d/Project/emotions/data/iemocap_4emo_spectr_norm_simple_limitlen'

train_tfrecord = 'train.tfrecords'
vali_tfrecord = 'vali.tfrecords'
test_tfrecord = 'test.tfrecords'

train_sessions = ['Session1', 'Session2', 'Session3']
vali_sessions = ['Session4']
test_sessions = ['Session5']

emos = ['neu', 'ang', 'hap', 'sad']
emos_dict = dict(zip(emos, range(4)))

limit_len = 1200


# ===============================================


def collect_neu_data(session_name):
    global count_collect
    label_filepath = os.path.join(npy_dir, session_name + '_sentence_label')
    neu_data_list = list()
    with open(label_filepath, 'r') as l_f:
        for line in l_f:
            if 'neu' in line:
                count_collect += 1
                print('collect neu count', count_collect)
                eles = line.split()
                npy_f = os.path.join(npy_dir, session_name, eles[0] + '.npy')
                spectr = np.load(npy_f)
                neu_data_list.append(spectr)
    return neu_data_list


def get_scaler(session_names):
    neu_data_list = list()
    for session_name in session_names:
        neu_data_list_single = collect_neu_data(session_name)
        neu_data_list += neu_data_list_single
    print('vstack neu ...')
    neu_data = np.vstack(neu_data_list)
    print('transform')
    scaler = StandardScaler().fit(neu_data)
    return scaler


def _byte_feature(value):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))


def _int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))


def process_an_example(norm_np, sentence_str, label):
    length = norm_np.shape[0]
    label_int = emos_dict[label]
    example = tf.train.Example(features=tf.train.Features(feature={
        'data': _byte_feature(norm_np.astype(np.float32).tostring()),
        'len': _int64_feature(length),
        'sentence_id': _byte_feature(str.encode(sentence_str)),
        'label': _int64_feature(label_int)
    }))
    return example.SerializeToString()


def process_dataset(session_names, record_name, scaler):
    global count
    if not os.path.exists(tfrecord_dir):
        os.makedirs(tfrecord_dir)
    record_path = os.path.join(tfrecord_dir, record_name)
    with tf.python_io.TFRecordWriter(record_path) as writer:
        for session_name in session_names:
            label_filepath = os.path.join(npy_dir,
                                          session_name + '_sentence_label')
            with open(label_filepath, 'r') as label_f:
                for line in label_f:
                    eles = line.split()
                    if len(eles) == 2:
                        npy_f = os.path.join(npy_dir, session_name,
                                             eles[0] + '.npy')
                        spectr = np.load(npy_f)
                        if spectr.shape[0] > limit_len:
                            continue
                        norm_spectr = scaler.transform(spectr)
                        writer.write(
                            process_an_example(norm_spectr, eles[0], eles[1]))
                        count += 1
                        print('write record', count)


def main():
    scaler = get_scaler(train_sessions)
    process_dataset(train_sessions, train_tfrecord, scaler)
    process_dataset(vali_sessions, vali_tfrecord, scaler)
    process_dataset(test_sessions, test_tfrecord, scaler)


if __name__ == '__main__':
    main()
