import wave
from pydub import AudioSegment
import os
import numpy as np
import struct
import soundfile as sf


def mono2stero2(in_file, out_path):
    # read wave files
    left_data, _ = sf.read(in_file)
    right_data, _ = sf.read(in_file)

    # A 2D array where the left and right tones are contained in their respective rows
    stereo = np.vstack((left_data, right_data))

    # Reshape 2D array so that the left and right tones are contained in their respective columns
    stereo = stereo.transpose()

    out_file = os.path.join(out_path, 'temp.wav')
    sf.write(out_file, stereo, samplerate=44100)



def mono2stero(in_file, out_path):
    file = open(in_file, 'rb')
    file_tuple = os.path.splitext(os.path.basename(file.name))
    file.close()

    # song = AudioSegment.from_mp3(in_file)
    # temp_file = "%s/temp_%s%s" % (out_path, file_tuple[0], '.wav')
    # song.export(temp_file, format="wav")

    wf = wave.open(in_file, 'rb')
    frames = wf.getnframes()
    framerate = wf.getframerate()
    str_data = wf.readframes(frames)
    sample_width = wf.getsampwidth()
    wf.close()

    wave_data = np.fromstring(str_data, dtype=np.short)
    wave_data.shape = (-1, 2)
    wave_data = wave_data.T
    mono_wave = (wave_data[0]+wave_data[1])/2

    out_file =  "%s/%s%s" % (out_path, file_tuple[0], '.wav')
    wf_mono = wave.open(out_file, 'wb')
    wf_mono.setnchannels(1)
    wf_mono.setframerate(framerate)
    wf_mono.setsampwidth(sample_width)
    for i in mono_wave:
        data = struct.pack('<h', int(i))
        wf_mono.writeframesraw(data)
    wf_mono.close()

    # new_song = AudioSegment.from_wav("E:/music/temp/%s%s" % (file_tuple[0], '.wav'))
    # new_song = new_song + 20
    # new_song.export("E:/music/dst/%s%s" % (file_tuple[0], '.mp3'), format="mp3")

if __name__ == '__main__':
    #mono2stero("../audio/low battery.wav", "../output")
    mono2stero2("../audio/low battery.wav", "../output")
