import tiktoken
from llama_index.core import Settings
from llama_index.core.callbacks import TokenCountingHandler, CallbackManager
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from transformers import AutoTokenizer

Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)

Settings.embed_model = OpenAIEmbedding(
    model="text-embedding-3-small", embed_batch_size=100
)

Settings.chunk_size = 512
Settings.chunk_overlap = 20

Settings.transformations = [SentenceSplitter(chunk_size=1024)]

Settings.tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo").encode

# open-source

Settings.tokenzier = AutoTokenizer.from_pretrained(
    "mistralai/Mixtral-8x7B-Instruct-v0.1"
)

token_counter = TokenCountingHandler()
Settings.callback_manager = CallbackManager([token_counter])

# maximum input size to the LLM
Settings.context_window = 4096

# number of tokens reserved for text generation.
Settings.num_output = 256
