#encoding=utf-8
import kaldiio
import torch
import torchvision
#import torchaudio
from torch import nn, optim, Tensor
#import data_prepare_by_name
import numpy as np
import logging
import codecs
from tqdm import tqdm
import os, sys
import random
import argparse
import time

#import data_prepare
#from data_load import W2LData_byname, collate_fn, collate_fn_chunk, ChunkData, compute_dev_ratio, compute_exp_lr_base
#from data_prepare_by_name import generate_data
from sklearn.model_selection import train_test_split
from torch.utils.data.dataloader import DataLoader
from model import *
#from torchaudio.models import Wav2Letter


def make_priors(train_data, net, label_dim):
    vec_sum = np.zeros(label_dim)
    spend_time = 0
    for i in tqdm(range(len(train_data))):
        
        voice = kaldiio.load_scp(train_data[i][0])
        label = kaldiio.load_scp(train_data[i][1])
        
        voice = np.asarray(list(voice.values()))
        label = np.asarray(list(label.values()))
        min_len = min(voice.shape[0], label.shape[0])
        voice = voice[:min_len]
        label = label[:min_len]
        voice, label = Tensor(voice), Tensor(label)
        voice, label = voice.cuda(), label.cuda()
        st_time = time.time()
        output = net(voice)
        spend_time += time.time() - st_time
        print(spend_time, i, voice.shape)
        output = output.permute(0, 2, 1)

        softmax_func = nn.Softmax(dim=1)
        soft_output = softmax_func(output).detach().cpu().numpy()
        #print(type(soft_output),  soft_output.shape)
        this_sum = np.sum(soft_output, axis=(0,2))
        #print(this_sum.shape)
        vec_sum += this_sum
    
    scale = np.sum(vec_sum)
    vec_sum = vec_sum * float(1/scale)
    print("Priors vec is done", spend_time)
    return vec_sum


if __name__ == '__main__':
    if len(sys.argv) != 6:
        print('{} {} {} {} {} {}\n'.format(sys.argv[0], "<feats>", "<labels>", "<net>", "<input_dim>", "<label_dim>"))
        sys.exit(-1)
    feats_scp_list, labels_list, final_mdl, input_dim,  label_dim = sys.argv[1:]
    if not os.path.exists(feats_scp_list):
        raise FileNotFoundError
    feats_list = []
    with codecs.open(filename=feats_scp_list, mode='r') as f1, codecs.open(filename=labels_list, mode='r') as f2:
        feats_lines = f1.readlines() 
        labels_lines = f2.readlines()
        assert len(feats_lines) == len(labels_lines)
        for i in range(len(feats_lines)):
            feats_list.append((feats_lines[i].strip(), labels_lines[i].strip()))

    total_dataset = []
    counter = 0
    for item in feats_list:
        if counter < 1:
            print('feats {}\n'.format(item))
        total_dataset.append(item)
        counter += 1
    print('{} scps read\n'.format(counter))

    # get trainset list object

    cell_dim = 128
    input_dim, label_dim = eval(input_dim), eval(label_dim)
    net = BLSTM3(input_dim, cell_dim, label_dim)
    net = nn.DataParallel(net)
    net.load_state_dict(torch.load(final_mdl))
    net = net.cuda()
    net.eval()
    
    #print(feats_list)
    prior_filearkname = final_mdl + ".prior.ark"
    prior_filescpname = final_mdl + ".prior.scp"
    priors = make_priors(feats_list, net, label_dim)
    print(priors)
    print(priors.shape)
    #if len(priors) > 0:
    #with codecs.open(filename=prior_filename, mode='w') as f:
    kaldiio.save_ark(prior_filearkname, {"prior": priors}, scp=prior_filescpname)
    print('priors is done !\n')
