import openai
import time
import json
import logging
# read the key from environment variable stored in a file

# log AI 

# Initial Prompt
round_limit = 20
openai.api_key ='sk-xxxx'

msgs_musk = [ {"role": "system", "content": "You are a debater that can emulate Elon Musk. You are innovative, entrepreneurial and futuristic. You possess a deep knowledge of the technology industry and future trends, often think outside the box and are willing to take risks. You have a casual tone, a good sense of humor and often come up with big, inspirational ideas."}, ]

msgs_einstein = [ {"role": "system", "content": "You are a debater that can emulate Albert Einstein. You are brilliant, insightful and deeply thoughtful, with a vast understanding of physics and the mysteries of the universe. Though you often speak on complex topics, you strive to explain things in a way that non-scientists can understand. You are also humble, kind, and occasionally humorous."}, ]

#prompt = "Hello, Let's chat about some hot topic in the world. Could you please share your suggestions about how to become rich?"
prompt = "Hello, Let's debate some controversial topic in the US like gun control. Let Musk take the opposing side and Einstein take the support side. Besides offering unique personal opinions, both sides must also cite credible studies and elaborate on the relevant constitution amendment. Let's begin the debate:"

logging.basicConfig(filename='conversation.log', level=logging.INFO)

# Calling ChatGPT API
# Add answer to the msgs list, with a size limit
def getAnswerFromAModel(msgs_a:list, msg: str, selected_model:str) -> str:    
    # msgs is a list of dictionaries!    
    # append the user's message info as a dictionary to the msgs list
    msgs_a.append({"role": "user", "content": msg})
    
    response = openai.ChatCompletion.create(
        model=selected_model,
        messages=msgs_a,
        # max_tokens = 100,
        temperature=0.8
    )['choices'][0]['message']['content'].strip()
    # strip() removes the leading and trailing spaces

    # append the response info. as a dictionary into the msgs list
    msgs_a.append({"role": "assistant", "content": response})

    #print("all messages:=======================")
    #print(msgs)
    
    list_as_str = json.dumps(msgs_a)
    tokens = list_as_str.split()
    token_count = len(tokens)
    # This checks if the length of the msgs list has exceeded 25 elements,
    # and if so, removes the second and third elements of the list.
    # This ensures that the msgs list does not grow too large and consume too much memory.
    if len(msgs_a) >= 25 or token_count > 8000:
        msgs_a.pop(1)  # list index id starts from 0
        msgs_a.pop(2)

    return response
  

counter =1 
while True:

    try:     
        # Model 1's response
        response1 = getAnswerFromAModel(msgs_musk, prompt, 'gpt-4')
        print(f'========round {counter}============')
        print('Musk:', response1)
        print('.....................')
        logging.info(f'========round {counter}============')
        logging.info('Musk: %s', response1)
        logging.info('.....................')    

    except openai.error.RateLimitError as e:        
        # Handle Rate Limit Error
        print(f"Rate limit reached. Waiting for {e.retry_after} seconds...")
        time.sleep(e.retry_after+1)
                
    # Pause for 5 seconds
    time.sleep(10)
    
    try: 
        # Model 2's response
        prompt = response1
        response2 = getAnswerFromAModel(msgs_einstein, prompt, 'gpt-4')
                
        print('Einstein:', response2)  
        logging.info('Einstein: %s', response1)
        # Model 2's response becomes the new prompt
        prompt = response2
                        
    except openai.error.RateLimitError as e:
        # Handle Rate Limit Error
        print(f"Rate limit reached. Waiting for {e.retry_after} seconds...")
        time.sleep(e.retry_after+1)
    counter += 1
    if counter > round_limit:
        break      
