"""
 According to the png from /png, I got that sampling distribution is nat balanced,
 to handle this problem, construting a balanced set is in first.
 When training sampling 100, testing sampling 200, deving sampling 200.
 If amout of each catogray less than this basic line ,generate sampling with sample_gererator.
"""
import os
import random

import numpy as np
import torch

def sava_tensors(_tensor,label,num,model,idx,project_path):
    list = {"train_1":idx[0],"train_2":idx[1],"dev_1":idx[1],"dev_2":-1}
    train_file = project_path+"/Processed/"+model+"/tensor.npy"
    num_file = project_path+"/Processed/"+model+"/num.npy"
    label_file = project_path+"/Processed/"+model+"/label.npy"
    dir_path = project_path+"/Processed/"+model+"/"
    if os.path.exists(dir_path):
        print(dir_path + " is existed")
        pass
    else:
        os.makedirs(dir_path)
        print("mkdir "+ dir_path)
    # tensor_ = np.empty([0,50,100],dtype=np.float)
    # num_ = np.empty([0,50],dtype=np.long)
    # label_ = np.empty([0,1],dtype=np.long)
    tensor_ = []
    num_ = []
    label_ = []
    if model == "train":
        min_len = idx[1]
    else:
        min_len = min(len(_tensor[0]),len(_tensor[1]),len(_tensor[2]),len(_tensor[3]))-idx[1]-1
    for i in range(4):
        _tensor_,_num_,_label_ = [],[],[]
        _tensor_ = np.array(_tensor[i][list[model+"_1"]:list[model+"_2"]])
        _num_ = np.array(num[i][list[model+"_1"]:list[model+"_2"]])
        _label_ = np.array(label[i][list[model+"_1"]:list[model+"_2"]])
        tensor_.append(_tensor_[:min_len])
        num_.append(_num_[:min_len])
        label_.append(_label_[:min_len])
    print(tensor_[0].shape,tensor_[1].shape,tensor_[2].shape,tensor_[3].shape)
    np.save(train_file,np.array(tensor_))
    np.save(num_file,np.array(num_))
    np.save(label_file,np.array(label_))
    print(model+" saved ! \n")

def generator(category,num,idx,project_path):
    # 加载numpy数组
    w2vnp = np.load(project_path+"/vec2tensor/w2vtensor.npy")
    w2numnp = np.load(project_path+"/vec2tensor/w2num.npy")
    labelnp = np.load(project_path+"/vec2tensor/labels.npy")
    # 生成词表
    wordnp =[[] for i in range(category)]
    numnp =[[] for i in range(category)]
    _labelnp =[[] for i in range(category)]

    for i in range(category):
        # 循环取词
        for x,y,z in zip(labelnp,w2vnp,w2numnp):
            if x == i :
                # print(y.shape)
                wordnp[i].append(y)
                numnp[i].append(z)
                _labelnp[i].append(i)
        wordnp[i] = wordnp[i][:num]
        numnp[i] = numnp[i][:num]
        _labelnp[i] = _labelnp[i][:num]
    wordnp = np.array(wordnp,dtype=np.float)
    _labelnp = np.array(_labelnp,dtype=np.long)
    numnp = np.array(numnp,dtype=np.long)
    print(wordnp.shape)
    sava_tensors(wordnp,_labelnp,numnp,"train",idx,project_path)
    sava_tensors(wordnp,_labelnp,numnp,"dev",idx,project_path)
