

import glob
import os
import pickle
import random
import numpy as np


def CreateMap(charFile):
    file = open(charFile)
    charTable = []
    charTable += file.read().split('\n')
    # charTable = charTable[0:10]
    # charTable = [c.lower() for c in charTable]
    s2lDict = dict([(c, i) for i, c in enumerate(charTable)])
    l2sDict = dict([(i, c) for i, c in enumerate(charTable)])
    return  s2lDict, l2sDict

def ReadContent(contentPath):
    contentDict = {}
    file = open(contentPath, 'r')
    lines = file.read().split('\n')
    lines = list(filter(lambda x:x!='', lines))
    for line in lines:
        clip = line.split(' ', 1)
        contentDict[clip[0]] = clip[1]
    return contentDict

def CreateMapTable(tablePath):
    file = open(tablePath)
    lines = file.read().split('\n')
    lines = list(filter(lambda x:x!='', lines))
    mapTable = {}
    for line in lines:
        clip = line.split(' ', 1)
        mapTable[clip[0]] = clip[1]
    return mapTable

def CreateDataPath(dataDir, mode, mapTable):
    dataDir = dataDir + '/'
    pattern = f'{dataDir}{mode}/*'
    savePath = f'dataset/{mode}.txt'
    saveFile = open(savePath, 'w')

    for path in glob.glob(pattern):
        childPath = path
        childPath = childPath.replace(os.sep, '/')
        childPath = childPath.replace(dataDir, '')
        saveFile.write(childPath)
        saveFile.write('\n')

        # childPath = os.path.splitext(childPath)[0]
        fileName = childPath.split('/')[-1]
        fileName = fileName.split('_')[0]
        sent = mapTable[fileName]
        saveStr = childPath + ' ' + sent

    saveFile.close()

def getTestPathFromDir(dirPath, trainPath):
    trainNames = readDataFromFile(trainPath)
    vocab = map(lambda x: x.split('_')[0], trainNames)
    vocab = set(vocab)

    pattern = f'{dirPath}/*'
    testNames = []
    for path in glob.glob(pattern):
        path = path.replace(os.sep, '/')
        saveName = path.split('/')[-1]
        word = saveName.split('_')[0]
        if word in vocab:
            testNames.append(saveName)
    saveTestPath = 'dataset/test.txt'

    if os.path.exists(saveTestPath):
        newPath = saveTestPath.split('.')[0] + "_old.txt"
        os.rename(saveTestPath, newPath)

    with open(saveTestPath, 'w') as f:
        for name in testNames:
            f.write(name)
            f.write('\n')

def CreateContent(corpus, mapTable):
    corpus += '/'
    pattern = f'{corpus}*.wav'
    contPath = 'dataset/content.txt'
    saveFile = open(contPath, 'w')
    for path in glob.glob(pattern):
        path = path.replace(os.sep, '/')

        path = path.replace(corpus, '')
        name = path.split('/')[-1]
        name = name.split('_')[0]
        name = mapTable[name]
        real = path + ' ' + name
        saveFile.write(real)
        saveFile.write('\n')
    saveFile.close()

def createContentFromFilePath(trainFilePath, testFilePath, mapTable):
    trainPaths = readDataFromFile(trainFilePath)
    testPaths = readDataFromFile(testFilePath)

    contPath = 'dataset/content.txt'
    saveFile = open(contPath, 'w')

    for path in trainPaths:
        path = path.replace(os.sep, '/')
        name = path.split('/')[-1]
        name = name.split('_')[0]
        name = mapTable[name]
        real = path + ' ' + name
        saveFile.write(real)
        saveFile.write('\n')

    for path in testPaths:
        path = path.replace(os.sep, '/')
        name = path.split('/')[-1]
        name = name.split('_')[0]
        name = mapTable[name]
        real = path + ' ' + name
        saveFile.write(real)
        saveFile.write('\n')
    saveFile.close()

def SplitDataPath(dataDir, sentTable, validPercent, isGeneratePath = True):
    pattern = f'{dataDir}/*.wav'
    sents = open(sentTable, 'r').read().split('\n')
    sents = list(filter(lambda x: x!='', sents))
    # sents = sents[0:10]

    fileNames = []
    for path in glob.glob(pattern):
        tempName = path.split(os.sep)[-1]
        fileNames.append(tempName)
    print(fileNames)
    trainNames = []
    testNames = []
    for sent in sents:
        curList = list(filter(lambda x: sent == x.split('_')[0], fileNames))
        curLen = len(curList)
        validNum = int(validPercent * curLen)
        print(f'validnum is: {validNum}')
        select = random.sample(range(0, curLen), validNum)
        for k in range(curLen):
            if k in select:
                testNames.append(curList[k])
            else:
                trainNames.append(curList[k])
    ###
    # 把路径保存到文件里

    if not os.path.exists('dataset'):
        os.makedirs('dataset')

    saveTrainPath = 'dataset/train.txt'
    saveTestPath = 'dataset/test.txt'

    if isGeneratePath:
        with open(saveTrainPath, 'w') as f:
            for name in trainNames:
                f.write(name)
                f.write('\n')
    else:
        print("train path file already existed")

    if isGeneratePath:
        with open(saveTestPath, 'w') as f:
            for name in testNames:
                f.write(name)
                f.write('\n')
    else:
        print("test path file already existed")

def Sent2Label(sent, netType, vocabType, s2lDict, labMaxLen):
    label = None
    if vocabType == 'word':
        words = sent.split(' ')
        if netType == 'ed':
            label = EDs2l(words, s2lDict, labMaxLen)
        if netType == 'conv' or netType == 'rnn':
            label = Convs2l(words, s2lDict)
    elif vocabType == 'char':
        if netType == 'ctc':
            label = CTCs2l(sent, s2lDict, labMaxLen)

    return label

def CTCs2l(sent, s2lDict, labMaxLen):
    label = np.ones((labMaxLen, )) * s2lDict[' ']
    for i in range(len(sent)):
        label[i] = s2lDict[sent[i]]
    return label

def EDs2l(clips, s2lDict, labMaxLen):
    label = np.ones((labMaxLen, )) * s2lDict['EOS']
    label[0] = s2lDict['SOS']
    for i in range(len(clips)):
        label[i+1] = s2lDict[clips[i]]
    return label

def Convs2l(clips, s2lDict):
    label = []
    for word in clips:
        label.append(s2lDict[word])
    return label

def LoadData(dataPath):
    with open(dataPath, 'rb') as f:
        data = pickle.load(f)
    return data


def CTCTrans(tensor, l2sDict):
    pass

def EDTrans(tensor, l2sDict):
    sent = ''
    for i in range(tensor.shape[0]):
        tranChar = l2sDict[tensor[i].item()]
        if tranChar in ['SOS', 'EOS']:
            tranChar = ''
        if len(tranChar) <= 1:
            sent += tranChar
        else:
            sent += tranChar + ' '
    # print(sent)
    sent = sent.strip()
    return sent

def ConvTrans(tensor, l2sDict):
    sent = l2sDict[tensor.item()]
    return sent

def WordTrans(tensor, l2sDict):
    sent = ''
    maxLen = tensor.shape[0]
    for i in range(maxLen):
        tranWord = l2sDict[tensor[i].item()]
        if tranWord not in ['SOS', 'EOS']:
            if i != maxLen - 1:
                sent += tranWord
                sent += ' '
            else:
                sent += tranWord
    return sent


def findPathWithVocab(allPath, vocab:dict):
    filtPath = list(filter(lambda x:x.split('/')[-1].split('_')[0] in vocab.keys(), allPath))
    # print(filtPath)
    return filtPath

def getAndFiltPath(filePath):
    lines = open(filePath, 'r').read().split('\n')
    lines = list(filter(lambda x:x!='', lines))
    return lines


def getLessTrainPath(paths, s2lDict, trainRate):
    trainNumber = int(24*trainRate)
    select = random.sample(range(0, 24), trainNumber)
    trainPath = []
    showDict= {}
    for curVocab in s2lDict.keys():
        curPath = list(filter(lambda x:x.split('/')[-1].split('_')[0] == curVocab, paths))
        for num in select:
            trainPath.append(curPath[num])
    # print(showDict)
    return trainPath

def updateConfuseMatrix(matrix, predSent, targetSent, s2lDict):
    sentLen=len(predSent)
    for i in range(sentLen):
        x = s2lDict[targetSent[i]]
        y = s2lDict[predSent[i]]
        matrix[x][y] += 1
    return matrix

def composePath(trainPath, testPath):
    trainLines = getAndFiltPath(trainPath)
    testLines = getAndFiltPath(testPath)
    allLines = []
    for line in trainLines:
        allLines.append(line)
    for line in testLines:
        allLines.append(line)
    savePath = 'dataset/all_path.txt'
    with open(savePath, 'w') as f:
        for line in allLines:
            f.write(f'{line}\n')


def readDataFromFile(filePath):
    lines = open(filePath, 'r').read().split('\n')
    lines = list(filter(lambda x:x!='', lines))
    # print(f"backline {lines[-1]}")
    return lines

def getAllPathFromDir(dirPath, fileFormat):
    pattern = os.path.join(dirPath, fileFormat)
    allPath = []
    for path in glob.glob(pattern):
        allPath.append(path)
    return allPath

def pickleSaveData(data, savePath):
    with open(savePath, 'wb') as f:
        pickle.dump(data, f)

# 多个数据集生成路径