import os
import openai
import sys
import json
from youtube_transcript_api import YouTubeTranscriptApi
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# global variables
average_length_per_token=4 
max_token_per_chunk=1900

max_length=1024
max_new_tokens=1024

# a function to translate English to Chinese
# the input: a list of paragraphs: each paragraph is a string
# the output: a list of translated paragraphs
def translateEnglishToChinese(englishText):
    # we shoud use batched translation to speed up the translation process
    # I tried to translate one paragraph at a time, but it is slow and has extra characters generated in between paragraphs. 
    batch_size=10
    # extract batch_size number of paragraphs from the input list
    # and translate them
    # then append the translated paragraphs to the output list
    # and repeat the process until all the paragraphs are translated
    translatedText = []
    for i in range(0, len(englishText), batch_size):
        batch = englishText[i:i+batch_size]
        # load the tokenizer and model
        tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-zh")
        model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-zh")

        batch_results = model.generate(**tokenizer(batch, return_tensors="pt", max_length=max_length, truncation=True, padding=True))
    # decode the text
        decoded_batch_results = tokenizer.batch_decode(batch_results, skip_special_tokens=True)
        translatedText.extend(decoded_batch_results)

    return translatedText

# function to get the transcript of a youtube video
# Input : youtube video id, language code, output file name
# Output : list of lines of the transcript
def getTranscript(video_id, lang):
    # get the video id from the url
    transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[lang])
    # if transcript is not empty, write it to a file as an intermediate result
    outfile = video_id + "." + lang + ".json"
    if transcript:
        # outfile is the file name to store the transcript
        # json_file is the file object
        with open(outfile, 'w', encoding='utf-8') as json_file:
            json.dump(transcript, json_file)
        str_list =[]
        for i in transcript:
            str_list.append(i['text'])
        # write the extracted English text to a file
        with open(video_id+".transcript"+".en", "w") as file:
            for line in str_list:
                 file.write(line + "\n") # write each line with new line character
        return str_list
    return None

# a function to call OpenAI API to process a chunk of text and return the result
# output is a string
def callOpenAI(chunk):
    # check the chunk size is not too long
    token_count = len(chunk)/average_length_per_token
    if token_count > max_token_per_chunk:
        print("Error: chunk is too long: with %d tokens" % token_count)
        return ""

    # p is the prompt value
    p="add punctuations into the following text: \n"+chunk
    
    #openai.organization = ''
    openai.api_key = os.getenv("OPENAI_API_KEY")
    response = openai.Completion.create(
      model="text-davinci-002",
      prompt=p,
      temperature=0,
      max_tokens=max_token_per_chunk, # this is the response's max token, it should be 4000- prompt_token_count
      top_p=1,
      frequency_penalty=0,
      presence_penalty=0
    )
    
    # return the result's text field
    return response.choices[0].text

# a function to combine lines of text into chunks
# each chunk is a string of up to 1900 tokens
def combineLinesIntoChunks(text):
    # a parameter to estimate how many token a length has
    # a loop to add 1900 tokens into a chunk of text
    chunks=[] # list of chunks: 
    token_count=0
    chunk="" # current candidate chunk
    for line in text: # f.readlines()
        extra_token =len(line)/average_length_per_token
        # if the chunk is too long, add it to the list and start a new chunk
        if (token_count+extra_token) > max_token_per_chunk:
            chunks.append(chunk)
            chunk = line
            token_count = extra_token
        else: # if the chunk is not too long, add the line to the chunk
            chunk += line
            token_count += extra_token
    if chunk != "":
        chunks.append(chunk)

    return chunks

# a function to add punctuations to a list of lines
# several lines are combined into a chunk of text to fit the OpenAI API's limit
# the result is a list of paragraphs with punctuations
# file name is the name of the file to store the result of adding punctuations
# the name convention is: video_id.transcript.en.punctuated.txt
def addPunctuations(str_list, file_name):
    chunks = combineLinesIntoChunks(str_list)
    paragraphs = []
    for chunk in chunks:
        paragraph_with_punctuations = callOpenAI(chunk)
        if paragraph_with_punctuations != "":
            paragraphs.append(paragraph_with_punctuations)

    # write the result to a file
    with open(file_name, 'w', encoding='utf-8') as f:
        for paragraph in paragraphs:
            f.write(paragraph)
            #f.write("\n")
    return paragraphs


# a function to translate the paragraphs to Chinese
# input is a list of paragraphs with punctuations
# output is a list of translated paragraphs in Chinese
def translateParagraphsToChinese(video_id, en_punc_file_name):
    
    # somehow using list of paragraphs as input does not work.
    # so I read the file and use the list of lines as input.
    with open(en_punc_file_name, 'r', encoding='utf-8') as f:
        text = f.readlines()

        # must remove both the leading and trailing whitespaces, and empty lines
        # otherwise the translation will have extra characters.
        # remove the new line character from the end of each line
        text = [line.strip() for line in text]
        # remove the empty lines
        text = [line for line in text if line]
    
        # convert an integer into a string: 
        print("Number of non-empty paragraphs: " + str(len(text)))
        # translate the text
        translatedText = translateEnglishToChinese(text)
        
        # write the translated text to a file
        with open(video_id+".transcript"+".zh", "w") as file:
            for line in translatedText:
                 file.write(line + "\n\n") # write each line with new line character
    
# the main function
def main():
    
    if len(sys.argv) != 2:
        print("Usage: python3 allInOne.py <youtube_url>")
        sys.exit(1)

    # TODO: try to grab Chinese transcript from youtube first
    # 1. get the transcript of the video: save it to a json file
    # possible lang values are: ['en', 'de', 'es', 'fr', 'it', 'ja', 'ko', 'nl', 'pt', 'ru', 'zh']
    print("1/3: Getting the transcript of the video...")
    video_url = sys.argv[1]
    video_id = video_url.split("v=")[1]
    video_id = video_id.split("&")[0] # trim possible &t=xxx etc
    str_list= getTranscript(video_id, 'en')
    print("Transcript is saved to a json file named:", video_id+".transcript.en.json")
    print("Transcript is also saved to a text file named:", video_id+".transcript.en")
   
    # 2. call OpenAI API to process each chunk and save the result to a paragraph with punctuations
    print("2/3: Adding punctuations to the transcript...")
    final_input_file_name = video_id + ".transcript.en.punctuated.txt"
    paragraphs = addPunctuations(str_list, final_input_file_name)
    print("Punctuated transcript is saved to a file named:", final_input_file_name)
    
    # 3. translate the paragraphs to Chinese
    print("3/3: Translating the transcript to Chinese...")
    translateParagraphsToChinese(video_id, final_input_file_name)
    print("Translated transcript is saved to a file named:", video_id+".transcript.zh")

main()
