import os
from dotenv import load_dotenv
import time, datetime

load_dotenv()

def get_llm():

    from langchain.llms import Ollama
    # /opt/conda/lib/python3.11/site-packages/langchain_community/llms/ollama.py 
    llm = Ollama(base_url=os.getenv("ollama_url"),model=os.getenv("model_name"), num_thread=os.getenv("num_thread"))

    return llm

def get_embeddings():

    from langchain.embeddings.ollama import OllamaEmbeddings
    
    embeddings = OllamaEmbeddings(base_url=os.getenv("ollama_url"), model=os.getenv("embedding_model"), num_thread=os.getenv("num_thread"))

    return embeddings

def get_output_parser():
    
    from langchain_core.output_parsers import StrOutputParser

    output_parser = StrOutputParser()

    return output_parser

def get_env(key: str, default_value: str):
    value = os.getenv(key)

    if value == None:
        value = default_value
        
    return value

def is_trace_enabled():
    value = os.getenv("log_level")
    if value == "trace":
        return True
    return False

def is_debug_enabled():
    value = os.getenv("log_level")
    if value == "debug" or value == "trace":
        return True
    return False

def is_info_enabled():
    value = os.getenv("log_level")
    if value == "info" or value == "debug" or value == "trace":
        return True
    return False

def trace(msg: str):
    if is_trace_enabled() :
        print(msg, flush=True)

def debug(msg: str):
    if is_debug_enabled() :
        print(msg, flush=True)

def info(msg: str):
    if is_info_enabled() :
        print(msg, flush=True)

def error(msg: str):
    print(msg, flush=True)

# def get_env(key: str):
#     value = os.getenv(key)
#     return value
    
def chain_invoke(chain,query):

    print(f"Start at    ... {datetime.datetime.now()}",flush=True)
    time1=time.time()

    print(chain.invoke(query),flush=True)

    time2=time.time()
    print(f"\nComplete at ... {datetime.datetime.now()}",flush=True)
    print(f'Total time used {time2 - time1} seconds',flush=True)

def chain_stream(chain,query):

    print(f"Start at    ... {datetime.datetime.now()}",flush=True)
    time1=time.time()

    for chunk in chain.stream(query):
        print(chunk, end="", flush=True)
        
    time2=time.time()
    print(f"\nComplete at ... {datetime.datetime.now()}",flush=True)
    print(f'Total time used {time2 - time1} seconds')


def chain_starts():
    print(f"Start at    ... {datetime.datetime.now()}",flush=True)
    time1=time.time()
    return time1

def chain_completes(time1):        
    time2=time.time()
    print(f"\nComplete at ... {datetime.datetime.now()}",flush=True)
    print(f'Total time used {time2 - time1} seconds\n')

def chain_batch(chain,queries):

    print(f"Start at    ... {datetime.datetime.now()}",flush=True)
    time1=time.time()

    for result in chain.batch(queries):
        print(result,flush=True)

    time2=time.time()
    print(f"\nComplete at ... {datetime.datetime.now()}",flush=True)
    print(f'Total time used {time2 - time1} seconds',flush=True)

    