theekshana commited on
Commit
dfb1f95
1 Parent(s): d707386

anyscale LLAMA2

Browse files
.env CHANGED
@@ -12,6 +12,7 @@ TARGET_SOURCE_CHUNKS=4
12
  #API token keys
13
  HUGGINGFACEHUB_API_TOKEN=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
14
  OPENAI_API_KEY=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
 
15
 
16
  #api app
17
  APP_HOST=127.0.0.1
 
12
  #API token keys
13
  HUGGINGFACEHUB_API_TOKEN=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
14
  OPENAI_API_KEY=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
15
+ ANYSCALE_ENDPOINT_TOKEN=esecret_n1svfld85uklyx5ebaasyiw2m9
16
 
17
  #api app
18
  APP_HOST=127.0.0.1
__pycache__/config.cpython-311.pyc CHANGED
Binary files a/__pycache__/config.cpython-311.pyc and b/__pycache__/config.cpython-311.pyc differ
 
__pycache__/qaPipeline.cpython-311.pyc CHANGED
Binary files a/__pycache__/qaPipeline.cpython-311.pyc and b/__pycache__/qaPipeline.cpython-311.pyc differ
 
__pycache__/test 07 anyscale endpoint.cpython-311.pyc ADDED
Binary file (236 Bytes). View file
 
config.py CHANGED
@@ -1,11 +1,11 @@
1
  MODELS={
2
  "DEFAULT":"tiiuae/falcon-7b-instruct",
3
  # "gpt4all":"gpt4all",
4
- "flan-t5-xxl":"google/flan-t5-xxl",
5
  "falcon-7b-instruct":"tiiuae/falcon-7b-instruct",
 
 
6
  "openai gpt-3.5":"openai",
7
- # "Deci/DeciLM-6b-instruct":"Deci/DeciLM-6b-instruct",
8
- # "Deci/DeciLM-6b":"Deci/DeciLM-6b",
9
 
10
  }
11
 
 
1
  MODELS={
2
  "DEFAULT":"tiiuae/falcon-7b-instruct",
3
  # "gpt4all":"gpt4all",
4
+ # "flan-t5-xxl":"google/flan-t5-xxl",
5
  "falcon-7b-instruct":"tiiuae/falcon-7b-instruct",
6
+ "anyscale/Llama-2-13b":"anyscale/Llama-2-13b-chat-hf",
7
+ "anyscale/Llama-2-70b":"anyscale/Llama-2-70b-chat-hf",
8
  "openai gpt-3.5":"openai",
 
 
9
 
10
  }
11
 
faiss_index_with_year/tst DELETED
File without changes
faiss_index_with_year_2000_chunk/tst DELETED
File without changes
qaPipeline.py CHANGED
@@ -16,6 +16,7 @@ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
16
  from langchain.llms import GPT4All
17
  from langchain.llms import HuggingFaceHub
18
  from langchain.chat_models import ChatOpenAI
 
19
 
20
  # from langchain.retrievers.self_query.base import SelfQueryRetriever
21
  # from langchain.chains.query_constructor.base import AttributeInfo
@@ -39,6 +40,7 @@ model_n_batch = int(os.environ.get('MODEL_N_BATCH',8))
39
  target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))
40
 
41
  openai_api_key = os.environ.get('OPENAI_API_KEY')
 
42
 
43
  verbose = os.environ.get('VERBOSE')
44
 
@@ -152,6 +154,10 @@ class QAPipeline:
152
  self.llm = ChatOpenAI(model_name="Deci/DeciLM-6b-instruct", temperature=0)
153
  case "Deci/DeciLM-6b":
154
  self.llm = ChatOpenAI(model_name="Deci/DeciLM-6b", temperature=0)
 
 
 
 
155
  case _default:
156
  # raise exception if model_type is not supported
157
  raise Exception(f"Model type {model_type} is not supported. Please choose a valid one")
 
16
  from langchain.llms import GPT4All
17
  from langchain.llms import HuggingFaceHub
18
  from langchain.chat_models import ChatOpenAI
19
+ from langchain.chat_models import ChatAnyscale
20
 
21
  # from langchain.retrievers.self_query.base import SelfQueryRetriever
22
  # from langchain.chains.query_constructor.base import AttributeInfo
 
40
  target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))
41
 
42
  openai_api_key = os.environ.get('OPENAI_API_KEY')
43
+ anyscale_api_key = os.environ.get('ANYSCALE_ENDPOINT_TOKEN')
44
 
45
  verbose = os.environ.get('VERBOSE')
46
 
 
154
  self.llm = ChatOpenAI(model_name="Deci/DeciLM-6b-instruct", temperature=0)
155
  case "Deci/DeciLM-6b":
156
  self.llm = ChatOpenAI(model_name="Deci/DeciLM-6b", temperature=0)
157
+ case "anyscale/Llama-2-13b-chat-hf":
158
+ self.llm = ChatAnyscale(anyscale_api_key=anyscale_api_key,temperature=0, model_name='meta-llama/Llama-2-13b-chat-hf', streaming=False)
159
+ case "anyscale/Llama-2-70b-chat-hf":
160
+ self.llm = ChatAnyscale(anyscale_api_key=anyscale_api_key,temperature=0, model_name='meta-llama/Llama-2-70b-chat-hf', streaming=False)
161
  case _default:
162
  # raise exception if model_type is not supported
163
  raise Exception(f"Model type {model_type} is not supported. Please choose a valid one")