import os

import gradio as gr
import openai
from openai import OpenAI
import base64
from http.server import HTTPServer, BaseHTTPRequestHandler
from io import BytesIO

from dataclasses import replace
from http.server import HTTPServer, BaseHTTPRequestHandler
import json,os,re,chardet,posixpath,codecs
from urllib.parse import unquote
import cgi
from cgi import parse_header, parse_multipart
from urllib.parse import parse_qs
import cv2
from moviepy.editor import VideoFileClip
import datetime
import shutil
api_key='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'
openai_model = 'gpt-4o'
prompt = ""

client = OpenAI(api_key=api_key)
openai.api_key = api_key

testing = False
def encode_file_base64(file_path):
    with open(file_path,'rb') as f:
        return base64.b64encode(f.read()).decode('utf-8')
    

def decode_file_base64(base64string):
    base64data = base64string.encode('utf-8')
    filedata = base64.b64decode(base64data)
    x = datetime.datetime.now()
    dateTimeStr = x.strftime('%m-%d-%Y-%H-%M-%S')
    savepath = f'{dateTimeStr}.mp4'
    with open(savepath, "wb") as binary_file:
        binary_file.write(filedata)
    return savepath

def process_video(video_path, seconds_per_frame=2):
    base64Frames = []
    base_video_path, _ = os.path.splitext(video_path)

    video = cv2.VideoCapture(video_path)
    total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = video.get(cv2.CAP_PROP_FPS)
    frames_to_skip = int(fps * seconds_per_frame)
    curr_frame=0

    # Loop through the video and extract frames at specified sampling rate
    while curr_frame < total_frames - 1:
        video.set(cv2.CAP_PROP_POS_FRAMES, curr_frame)
        success, frame = video.read()
        if not success:
            break
        _, buffer = cv2.imencode(".jpg", frame)
        base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
        curr_frame += frames_to_skip
    video.release()

    # Extract audio from video
    audio_path = f"{base_video_path}.mp3"
    clip = VideoFileClip(video_path)
    clip.audio.write_audiofile(audio_path, bitrate="32k")
    clip.audio.close()
    clip.close()

    print(f"Extracted {len(base64Frames)} frames")
    print(f"Extracted audio to {audio_path}")
    return base64Frames, audio_path

def openai_completion(
    prompt,
    message_history,
    sysprompt = None,
    imagedata = None,
    imagetype = '',
    video = '',
    audio = '',
    temperature=0.9,
    max_tokens=150,
    top_p=1,
    frequency_penalty=0,
    presence_penalty=0.6,
):
    global client
    message = message_history
    
    if sysprompt is not None and len(sysprompt) > 0:
        message.append( {
                        "role": "system",
                        "content": sysprompt
                    })
    uploadMessage = {}
    uploadMessage['role'] = 'user'
    
    
    
    if imagedata is not None and len(imagedata)>0:
        content = []
        content.append({'type':'text','text':prompt})                
        content.append({'type':'image_url','image_url':{'url':f'data:image/{imagetype};base64,{imagedata}'}})        
        uploadMessage['content'] = content
    elif video is not None and len(video)>0:        
        content = [prompt,*map(lambda x:{"type":"image_url",
                         "image_url":{"url":f'data:image/jpg;base64,{x}',"detail":"low"}}, video)]
         
        uploadMessage['content'] = content
    
    elif audio is not None:    
        prompt = prompt.replace('{transcription}',audio.text)
        content = [{"type":"text","text":prompt}]
       
        uploadMessage['content'] = content
    else:
        uploadMessage['content'] = prompt
       
    
    message.append(uploadMessage)
    
    global testing
    if testing == True:
        class TRsp():
            def __init__(self):
                self.role = ''
                self.history=''
                self.content='testing'
            
        rsp_msg = TRsp()
    else:
        response = client.chat.completions.create(
            model=openai_model,
            messages=message,
            temperature=temperature,
            max_tokens=max_tokens,
            top_p=top_p,
            frequency_penalty=frequency_penalty,
            presence_penalty=presence_penalty,
        )

        rsp_msg = response.choices[0].message

    # return (msg.role,msg.content)
    return rsp_msg

def chatgpt(
    prompt,
    sysprompt,
    imagedata,
    imagetype,    
    video,
    audio,
    message_history,
    temperature=0.9,
    max_tokens=150,
    top_p=1,
    frequency_penalty=0,
    presence_penalty=0.6,
):
    message_history = message_history or []
   
    while len(message_history) > 4:
       del message_history[0]

        

    # create the output with openai
    message = openai_completion(
        prompt,
        message_history,
        sysprompt,
        imagedata,
        imagetype,
        video,
        audio,
        temperature,
        max_tokens,
        top_p,
        frequency_penalty,
        presence_penalty,
    )
    
    message_history.append({'role':message.role,'content':message.content})
    return message.content, message_history


class AIHTTPRequestHandler(BaseHTTPRequestHandler):
    def _set_headers(self):
        self.send_response(200)
        self.send_header("Content-type", "text/html")
        self.end_headers()

    def _html(self, message):
        content = f"<html><body><h1>{message}</h1></body></html>"
        return content.encode("utf8")

    def do_GET(self):
        self._set_headers()
        self.wfile.write(self._html("GET REQUEST"))

    def do_POST(self):
        print('do_post')
        postvars = self.parse_POST()
        json_object = json.loads(postvars)
        
        if self.path == "/tts":
            if 'voice' in json_object:
                prompt = json_object['prompt']
                voice = json_object['voice']
                response = client.audio.speech.create(
                    model="tts-1",
                    voice=voice,
                    input=prompt)
                
                tempTTSPath = './tts_temp.mp3'
                response.stream_to_file(tempTTSPath)
                
                base64File = encode_file_base64(tempTTSPath)
                
                #for test
                decode_file_base64(base64File)
                
                responseJson = {}
                responseJson['mp3'] = base64File
                jstr = json.dumps(responseJson).encode()
        elif self.path=='/gpt4o':
        
            
            prompt = json_object['prompt']
            if 'sys' in json_object:
                sys = json_object['sys']
            else:
                sys = ""
            if 'history' in json_object:
                history = json_object['history']
            else:
                history = []
            image = ''
            imagetype = ''
            video = None
            audio = None
            if 'image' in json_object: 
                image = json_object['image']
            if 'imagetype' in json_object: 
                imagetype = json_object['imagetype']
            if 'video' in json_object: 
                video = json_object['video']
                video_file_path = decode_file_base64(video)
                base64Frames, audio_path = process_video(video_path=video_file_path,seconds_per_frame=1)
                video = base64Frames
                os.remove(video_file_path)
                os.remove(audio_path)
            if 'audio' in json_object: 
                audio = json_object['audio']
                audio_file_path = decode_file_base64(audio)
                
                f = open(audio_file_path, "rb")
                transcription = client.audio.transcriptions.create(
                model="whisper-1",
                file=f)
                f.close()
                audio = transcription
                os.remove(audio_file_path)
 
            
             
            content,history = chatgpt(prompt,sys,image,imagetype,video,audio,history)
            
            responseJson = {}
            responseJson['content'] = content
            responseJson['history'] = history
            jstr = json.dumps(responseJson).encode()
        
        
        self.send_response(500)
        self.send_header('Content-Type', 'application/json')
        self.end_headers()
        self.wfile.write(jstr)
        return

    def do_HEAD(self):
        self._set_headers()

    def parse_POST(self):
        ctype, pdict = parse_header(self.headers['content-type'])
        print(self.headers)

        if ctype == 'multipart/form-data':
            postvars = cgi.parse_multipart(self.rfile, pdict)
        if ctype == 'application/json':
            length = int(self.headers['content-length'])          
            postvars = self.rfile.read(length).decode('utf8')
        elif ctype != 'application/x-www-form-urlencoded':
            length = int(self.headers['content-length'])
            postvars = parse_qs(self.rfile.read(length), keep_blank_values=1)
        else:
            postvars = {}        
        return postvars
        
if __name__ == "__main__":
    
    httpd = HTTPServer(('0.0.0.0', 8088), AIHTTPRequestHandler)
    httpd.serve_forever()
       