Guna0pro commited on
Commit
db6f86c
β€’
1 Parent(s): 2afec8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -15
app.py CHANGED
@@ -1,16 +1,16 @@
1
- # !pip -q install git+https://github.com/huggingface/transformers # need to install from github
2
- !pip install -q datasets loralib sentencepiece
3
- !pip -q install bitsandbytes accelerate xformers
4
- !pip -q install langchain
5
- !pip -q install gradio
6
 
7
- !pip -q install peft chromadb
8
- !pip -q install unstructured
9
- !pip install -q sentence_transformers
10
- !pip -q install pypdf
11
 
12
- from google.colab import drive
13
- drive.mount('/content/drive')
14
 
15
  """## LLaMA2 7B Chat
16
 
@@ -27,9 +27,6 @@ bnb_config = BitsAndBytesConfig(load_in_4bit=True,
27
 
28
  model_id = "meta-llama/Llama-2-7b-chat-hf"
29
 
30
- #daryl149/llama-2-7b-chat-hf
31
- #meta-llama/Llama-2-7b-chat-hf
32
-
33
  tokenizer = AutoTokenizer.from_pretrained(model_id,token='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')
34
  model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config = bnb_config,device_map={"":0},token='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')
35
 
@@ -55,7 +52,7 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
55
  from langchain.vectorstores import Chroma
56
  from langchain.document_loaders import PyPDFLoader
57
 
58
- loader = PyPDFLoader("/content/drive/MyDrive/Gen AI and LLM/data/data.pdf")
59
 
60
  text_splitter = RecursiveCharacterTextSplitter(
61
  # Set a really small chunk size, just to show.
 
1
+ # # !pip -q install git+https://github.com/huggingface/transformers # need to install from github
2
+ # !pip install -q datasets loralib sentencepiece
3
+ # !pip -q install bitsandbytes accelerate xformers
4
+ # !pip -q install langchain
5
+ # !pip -q install gradio
6
 
7
+ # !pip -q install peft chromadb
8
+ # !pip -q install unstructured
9
+ # !pip install -q sentence_transformers
10
+ # !pip -q install pypdf
11
 
12
+ # from google.colab import drive
13
+ # drive.mount('/content/drive')
14
 
15
  """## LLaMA2 7B Chat
16
 
 
27
 
28
  model_id = "meta-llama/Llama-2-7b-chat-hf"
29
 
 
 
 
30
  tokenizer = AutoTokenizer.from_pretrained(model_id,token='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')
31
  model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config = bnb_config,device_map={"":0},token='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')
32
 
 
52
  from langchain.vectorstores import Chroma
53
  from langchain.document_loaders import PyPDFLoader
54
 
55
+ loader = PyPDFLoader("/data/data.pdf")
56
 
57
  text_splitter = RecursiveCharacterTextSplitter(
58
  # Set a really small chunk size, just to show.