monra commited on
Commit
83290ff
1 Parent(s): 118c42b

Synced repo using 'sync_with_huggingface' Github Action

Browse files
client/html/index.html CHANGED
@@ -117,6 +117,8 @@
117
  <option value="gpt-4-32k">GPT-4-32k</option>
118
  </optgroup>
119
  <optgroup label="LLAMA">
 
 
120
  <option value="llama-2-70b-chat">llama-2-70b-chat</option>
121
  </optgroup>
122
  <optgroup label="{{_('IMAGE')}}">
 
117
  <option value="gpt-4-32k">GPT-4-32k</option>
118
  </optgroup>
119
  <optgroup label="LLAMA">
120
+ <option value="llama-2-7b-chat">llama-2-7b-chat</option>
121
+ <option value="llama-2-13b-chat">llama-2-13b-chat</option>
122
  <option value="llama-2-70b-chat">llama-2-70b-chat</option>
123
  </optgroup>
124
  <optgroup label="{{_('IMAGE')}}">
g4f/Provider/Providers/Chimera.py CHANGED
@@ -17,6 +17,8 @@ model = [
17
  'gpt-4',
18
  'gpt-4-0314',
19
  'gpt-4-32k',
 
 
20
  'llama-2-70b-chat',
21
  ]
22
  supports_stream = True
 
17
  'gpt-4',
18
  'gpt-4-0314',
19
  'gpt-4-32k',
20
+ 'llama-2-7b-chat',
21
+ 'llama-2-13b-chat',
22
  'llama-2-70b-chat',
23
  ]
24
  supports_stream = True
g4f/active_providers.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+ import g4f
3
+ from g4f import ChatCompletion
4
+
5
+ TEST_PROMPT = "Generate a sentence with 'ocean'"
6
+ EXPECTED_RESPONSE_CONTAINS = "ocean"
7
+
8
+
9
+ class Provider:
10
+ def __init__(self, name, models):
11
+ """
12
+ Initialize the provider with its name and models.
13
+ """
14
+ self.name = name
15
+ self.models = models if isinstance(models, list) else [models]
16
+
17
+ def __str__(self):
18
+ return self.name
19
+
20
+
21
+ class ModelProviderManager:
22
+ def __init__(self):
23
+ """
24
+ Initialize the manager that manages the working (active) providers for each model.
25
+ """
26
+ self._working_model_providers = {}
27
+
28
+ def add_provider(self, model, provider_name):
29
+ """
30
+ Add a provider to the working provider list of the specified model.
31
+ """
32
+ if model not in self._working_model_providers:
33
+ self._working_model_providers[model] = []
34
+ self._working_model_providers[model].append(provider_name)
35
+
36
+ def get_working_providers(self):
37
+ """
38
+ Return the currently active providers for each model.
39
+ """
40
+ return self._working_model_providers
41
+
42
+
43
+ def _fetch_providers_having_models():
44
+ """
45
+ Get providers that have models from g4f.Providers.
46
+ """
47
+ model_providers = []
48
+
49
+ for provider_name in dir(g4f.Provider):
50
+ provider = getattr(g4f.Provider, provider_name)
51
+
52
+ if _is_provider_applicable(provider):
53
+ model_providers.append(Provider(provider_name, provider.model))
54
+
55
+ return model_providers
56
+
57
+
58
+ def _is_provider_applicable(provider):
59
+ """
60
+ Check if the provider has a model and doesn't require authentication.
61
+ """
62
+ return (hasattr(provider, 'model') and
63
+ hasattr(provider, '_create_completion') and
64
+ hasattr(provider, 'needs_auth') and
65
+ not provider.needs_auth)
66
+
67
+
68
+ def _generate_test_messages():
69
+ """
70
+ Generate messages for testing.
71
+ """
72
+ return [{"role": "system", "content": "You are a trained AI assistant."},
73
+ {"role": "user", "content": TEST_PROMPT}]
74
+
75
+
76
+ def _manage_chat_completion(manager, model_providers, test_messages):
77
+ """
78
+ Generate chat completion for each provider's models and handle positive and negative results.
79
+ """
80
+ for provider in model_providers:
81
+ for model in provider.models:
82
+ try:
83
+ response = _generate_chat_response(
84
+ provider.name, model, test_messages)
85
+ if EXPECTED_RESPONSE_CONTAINS in response.lower():
86
+ _print_success_response(provider, model)
87
+ manager.add_provider(model, provider.name)
88
+ else:
89
+ raise Exception(f"Unexpected response: {response}")
90
+ except Exception as error:
91
+ _print_error_response(provider, model, error)
92
+
93
+
94
+ def _generate_chat_response(provider_name, model, test_messages):
95
+ """
96
+ Generate a chat response given a provider name, a model, and test messages.
97
+ """
98
+ return ChatCompletion.create(
99
+ model=model,
100
+ messages=test_messages,
101
+ chatId=str(uuid.uuid4()),
102
+ provider=getattr(g4f.Provider, provider_name)
103
+ )
104
+
105
+
106
+ def _print_success_response(provider, model):
107
+ print(f"\u2705 [{provider}] - [{model}]: Success")
108
+
109
+
110
+ def _print_error_response(provider, model, error):
111
+ print(f"\u26D4 [{provider}] - [{model}]: Error - {str(error)}")
112
+
113
+
114
+ def get_active_model_providers():
115
+ """
116
+ Get providers that are currently working (active).
117
+ """
118
+ model_providers = _fetch_providers_having_models()
119
+ test_messages = _generate_test_messages()
120
+ manager = ModelProviderManager()
121
+
122
+ _manage_chat_completion(manager, model_providers, test_messages)
123
+
124
+ return manager.get_working_providers()
get_working_providers.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from g4f.active_providers import get_active_model_providers
2
+
3
+ working_providers = get_active_model_providers()
4
+
5
+ print("\nWorking providers by model:")
6
+ for model, providers in working_providers.items():
7
+ print(f"{model}: {', '.join(providers)}")