Moin_Von_Bremen / app.py
Ankitajadhav's picture
Update app.py
b04a1c8 verified
raw
history blame
1.91 kB
# import packages
import gradio as gr
import copy
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
import chromadb
from chromadb.utils.embedding_functions import OpenCLIPEmbeddingFunction
from chromadb.utils.data_loaders import ImageLoader
from chromadb.config import Settings
from datasets import load_dataset
import numpy as np
from tqdm import tqdm
import shutil
import os
from chromadb.utils import embedding_functions
import gradio as gr
from PIL import Image
import requests
from io import BytesIO
from transformers import pipeline
from bark import SAMPLE_RATE, generate_audio, preload_models
# Initialize the Llama model
llm = Llama(
## original model
# model_path=hf_hub_download(
# repo_id="microsoft/Phi-3-mini-4k-instruct-gguf",
# filename="Phi-3-mini-4k-instruct-q4.gguf",
# ),
## compressed model
model_path=hf_hub_download(
repo_id="TheBloke/CapybaraHermes-2.5-Mistral-7B-GGUF",
filename="capybarahermes-2.5-mistral-7b.Q2_K.gguf",
),
n_ctx=2048,
n_gpu_layers=50, # Adjust based on your VRAM
)
# use of clip model for embedding
client = chromadb.PersistentClient(path="DB")
embedding_function = OpenCLIPEmbeddingFunction()
image_loader = ImageLoader() # must be if you reads from URIs
# initialize separate collection for image and text data
collection_images = client.create_collection(
name='collection_images',
embedding_function=embedding_function,
data_loader=image_loader)
collection_text = client.create_collection(
name='collection_text',
embedding_function=embedding_function,
)
# Get the uris to the images
IMAGE_FOLDER = 'images'
image_uris = sorted([os.path.join(IMAGE_FOLDER, image_name) for image_name in os.listdir(IMAGE_FOLDER) if not image_name.endswith('.txt')])
ids = [str(i) for i in range(len(image_uris))]
collection_images.add(ids=ids, uris=image_uris)