File size: 1,054 Bytes
95d56dc
2235d63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95d56dc
2235d63
 
 
 
 
 
 
 
 
 
 
 
 
 
95d56dc
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr

import os
import gc
import random
import warnings

warnings.filterwarnings("ignore")

import numpy as np
import pandas as pd

pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
pd.set_option("display.width", 1000)
from tqdm.auto import tqdm

import torch
import torch.nn as nn
import tokenizers
import transformers

print(f"tokenizers.__version__: {tokenizers.__version__}")
print(f"transformers.__version__: {transformers.__version__}")
print(f"torch.__version__: {torch.__version__}")
print(f"torch cuda version: {torch.version.cuda}")
from transformers import AutoTokenizer, AutoConfig
from transformers import BitsAndBytesConfig, AutoModelForCausalLM, MistralForCausalLM
from peft import LoraConfig, get_peft_model


title = "H2O AI Predict the LLM"

zero = torch.Tensor([0]).cuda()
print(zero.device) # <-- 'cpu' 🤔

@spaces.GPU
def greet(n):
    print(zero.device) # <-- 'cuda:0' 🤗
    return f"Hello {zero + n} Tensor"

gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text()).launch()