pseudotensor commited on
Commit
5a3fd3e
1 Parent(s): dcd2001

Update with h2oGPT hash cbc3104dd3c53bc549c8d9c8e9d1d4b4b33ece8c

Browse files
Files changed (4) hide show
  1. client_test.py +1 -1
  2. gen.py +1 -1
  3. gpt_langchain.py +2 -2
  4. gradio_runner.py +1 -1
client_test.py CHANGED
@@ -100,7 +100,7 @@ def get_args(prompt, prompt_type, chat=False, stream_output=False,
100
  chunk_size=512,
101
  document_choice=[DocumentChoices.All_Relevant.name],
102
  )
103
- from src.gen import eval_func_param_names
104
  assert len(set(eval_func_param_names).difference(set(list(kwargs.keys())))) == 0
105
  if chat:
106
  # add chatbot output on end. Assumes serialize=False
 
100
  chunk_size=512,
101
  document_choice=[DocumentChoices.All_Relevant.name],
102
  )
103
+ from gen import eval_func_param_names
104
  assert len(set(eval_func_param_names).difference(set(list(kwargs.keys())))) == 0
105
  if chat:
106
  # add chatbot output on end. Assumes serialize=False
gen.py CHANGED
@@ -243,7 +243,7 @@ def main(
243
  Also set --share=False to avoid sharing a gradio live link.
244
  :param chat: whether to enable chat mode with chat history
245
  :param chat_context: whether to use extra helpful context if human_bot
246
- :param stream_output: whether to stream output from src.gen
247
  :param show_examples: whether to show clickable examples in gradio
248
  :param verbose: whether to show verbose prints
249
  :param h2ocolors: whether to use H2O.ai theme
 
243
  Also set --share=False to avoid sharing a gradio live link.
244
  :param chat: whether to enable chat mode with chat history
245
  :param chat_context: whether to use extra helpful context if human_bot
246
+ :param stream_output: whether to stream output
247
  :param show_examples: whether to show clickable examples in gradio
248
  :param verbose: whether to show verbose prints
249
  :param h2ocolors: whether to use H2O.ai theme
gpt_langchain.py CHANGED
@@ -25,7 +25,7 @@ from tqdm import tqdm
25
 
26
  from enums import DocumentChoices, no_lora_str, model_token_mapping, source_prefix, source_postfix, non_query_commands, \
27
  LangChainAction, LangChainMode
28
- from src.gen import gen_hyper, get_model, SEED
29
  from prompter import non_hf_types, PromptType, Prompter
30
  from utils import wrapped_partial, EThread, import_matplotlib, sanitize_filename, makedirs, get_url, flatten_list, \
31
  get_device, ProgressParallel, remove, hash_file, clear_torch_cache, NullContext, get_hf_server, FakeTokenizer
@@ -749,7 +749,7 @@ def get_llm(use_openai_model=False,
749
 
750
  if stream_output:
751
  skip_prompt = False
752
- from src.gen import H2OTextIteratorStreamer
753
  decoder_kwargs = {}
754
  streamer = H2OTextIteratorStreamer(tokenizer, skip_prompt=skip_prompt, block=False, **decoder_kwargs)
755
  gen_kwargs.update(dict(streamer=streamer))
 
25
 
26
  from enums import DocumentChoices, no_lora_str, model_token_mapping, source_prefix, source_postfix, non_query_commands, \
27
  LangChainAction, LangChainMode
28
+ from gen import gen_hyper, get_model, SEED
29
  from prompter import non_hf_types, PromptType, Prompter
30
  from utils import wrapped_partial, EThread, import_matplotlib, sanitize_filename, makedirs, get_url, flatten_list, \
31
  get_device, ProgressParallel, remove, hash_file, clear_torch_cache, NullContext, get_hf_server, FakeTokenizer
 
749
 
750
  if stream_output:
751
  skip_prompt = False
752
+ from gen import H2OTextIteratorStreamer
753
  decoder_kwargs = {}
754
  streamer = H2OTextIteratorStreamer(tokenizer, skip_prompt=skip_prompt, block=False, **decoder_kwargs)
755
  gen_kwargs.update(dict(streamer=streamer))
gradio_runner.py CHANGED
@@ -57,7 +57,7 @@ from prompter import prompt_type_to_model_name, prompt_types_strings, inv_prompt
57
  get_prompt
58
  from utils import get_githash, flatten_list, zip_data, s3up, clear_torch_cache, get_torch_allocated, system_info_print, \
59
  ping, get_short_name, get_url, makedirs, get_kwargs, remove, system_info, ping_gpu
60
- from src.gen import get_model, languages_covered, evaluate, eval_func_param_names, score_qa, langchain_modes, \
61
  inputs_kwargs_list, scratch_base_dir, no_default_param_names, \
62
  eval_func_param_names_defaults, get_max_max_new_tokens, get_minmax_top_k_docs, history_to_context, langchain_actions
63
 
 
57
  get_prompt
58
  from utils import get_githash, flatten_list, zip_data, s3up, clear_torch_cache, get_torch_allocated, system_info_print, \
59
  ping, get_short_name, get_url, makedirs, get_kwargs, remove, system_info, ping_gpu
60
+ from gen import get_model, languages_covered, evaluate, eval_func_param_names, score_qa, langchain_modes, \
61
  inputs_kwargs_list, scratch_base_dir, no_default_param_names, \
62
  eval_func_param_names_defaults, get_max_max_new_tokens, get_minmax_top_k_docs, history_to_context, langchain_actions
63