DevsDoCode commited on
Commit
87729e7
1 Parent(s): 13d0476

Uploading Files

Browse files
Files changed (4) hide show
  1. api_info.py +87 -0
  2. app.py +42 -0
  3. deepinfra.py +74 -0
  4. requirements.txt +4 -0
api_info.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ developer_info = {
2
+ 'developer': 'Devs Do Code',
3
+ 'contact': {
4
+ 'Telegram': 'https://t.me/devsdocode',
5
+ 'YouTube Channel': 'https://www.youtube.com/@DevsDoCode',
6
+ 'LinkedIn': 'https://www.linkedin.com/in/developer-sreejan/',
7
+ 'Discord Server': 'https://discord.gg/ehwfVtsAts',
8
+ 'Instagram': {
9
+ 'Personal': 'https://www.instagram.com/sree.shades_/',
10
+ 'Channel': 'https://www.instagram.com/devsdocode_/'
11
+ }
12
+ }
13
+ }
14
+
15
+
16
+ endpoint = {
17
+ 'route': "/generate",
18
+ 'params': {
19
+ "query": "[SEARCH QUERY]"
20
+ },
21
+ 'optional_params': {
22
+ "max_tokens": "[]",
23
+ "model": "[]",
24
+ "temperature": "[]",
25
+ "system_prompt": "[]"
26
+ },
27
+ 'url_demo' : '/generate?query=Who is Devs Do Code&&max_tokens=500&&model=meta-llama/Meta-Llama-3-70B-Instruct&&temperature=0.7&&system_prompt=Your Owner is "Devs Do Code"'
28
+ }
29
+
30
+ available_models = {
31
+ "Meta Llama 3 70B Instruct": "meta-llama/Meta-Llama-3-70B-Instruct",
32
+ "Meta Llama 3 8B Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
33
+ "Mixtral 8x22B Instruct v0.1": "mistralai/Mixtral-8x22B-Instruct-v0.1",
34
+ "Mixtral 8x22B v0.1": "mistralai/Mixtral-8x22B-v0.1",
35
+ "Microsoft WizardLM 2 8x22B": "microsoft/WizardLM-2-8x22B",
36
+ "Microsoft WizardLM 2 7B": "microsoft/WizardLM-2-7B",
37
+ "HuggingFace Zephyr Orpo 141b A35b v0.1": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
38
+ "Google Gemma 1.1 7b it": "google/gemma-1.1-7b-it",
39
+ "Databricks DBRX Instruct": "databricks/dbrx-instruct",
40
+ "Mixtral 8x7B Instruct v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
41
+ "Mistral 7B Instruct v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
42
+ "Meta Llama 2 70b Chat HF": "meta-llama/Llama-2-70b-chat-hf",
43
+ "Cognitive Computations Dolphin 2.6 Mixtral 8x7B": "cognitivecomputations/dolphin-2.6-mixtral-8x7b"
44
+ }
45
+
46
+ error_message = {
47
+ 'developer_contact': {
48
+ 'Telegram': 'https://t.me/DevsDoCode',
49
+ 'Instagram': 'https://www.instagram.com/sree.shades_/',
50
+ 'Discord': 'https://discord.gg/ehwfVtsAts',
51
+ 'LinkedIn': 'https://www.linkedin.com/in/developer-sreejan/',
52
+ 'Twitter': 'https://twitter.com/Anand_Sreejan'
53
+ },
54
+ 'error': 'Oops! Something went wrong. Please contact the developer Devs Do Code.'
55
+ }
56
+
57
+
58
+ default_info = """This API is developed and being maintained by Devs Do Code (Sreejan).
59
+
60
+ **About the Developer**
61
+
62
+ Sreejan, a high school student from Patna, Bihar, India, has emerged as a notable figure in the technology sector.
63
+ His creation of an API is a testament to his dedication and expertise. Despite his youth, Sreejan's contributions
64
+ to artificial intelligence and machine learning are significant. As an AI & ML Engineer, he specializes in Deep Learning,
65
+ Natural Language Processing (NLP), and Robotics, with proficiency in Python, Java, and Mobile App Development.
66
+ Beyond his role as a technology consumer, Sreejan is an active open-source contributor, notably to projects like Hugging Face.
67
+
68
+ He is also recognized for his role in community development, particularly through "Devs Do Code," a platform he
69
+ founded to provide quality coding resources, tutorials, and projects. His mission is to equip developers with the
70
+ necessary skills to thrive in the ever-evolving tech landscape. Sreejan's commitment to sharing knowledge and
71
+ fostering collaboration is evident in his accessibility and engagement with the community across various platforms.
72
+
73
+ Connect with Sreejan and follow his journey in technology and innovation:
74
+
75
+ - Telegram: https://t.me/devsdocode
76
+ - YouTube Channel: https://www.youtube.com/@DevsDoCode
77
+ - LinkedIn: https://www.linkedin.com/in/developer-sreejan/
78
+ - Discord Server: https://discord.gg/ehwfVtsAts
79
+ - Instagram
80
+ - Personal: https://www.instagram.com/sree.shades_/
81
+ - Channel: https://www.instagram.com/devsdocode_/
82
+
83
+ Sreejan stands out not only as a developer but as a visionary and leader, driving change in the tech industry
84
+ with his passion, expertise, and unwavering commitment to community building. He continues to shape the
85
+ future of technology, one line of code at a time.
86
+ """
87
+
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ import deepinfra
3
+ import api_info
4
+
5
+ app = Flask(__name__)
6
+
7
+ @app.route('/')
8
+ def initial():
9
+ return '<pre>' + api_info.default_info + '</pre>'
10
+
11
+ @app.route("/available_models", methods=['GET'])
12
+ def available_models():
13
+ return jsonify(api_info.available_models)
14
+
15
+ @app.route("/endpoints", methods=['GET'])
16
+ def endpoints():
17
+ return jsonify(api_info.endpoint)
18
+
19
+ @app.route("/developer_info", methods=['GET'])
20
+ def developer_info():
21
+ return jsonify(api_info.developer_info)
22
+
23
+ @app.route('/generate', methods=['GET'])
24
+ def generate():
25
+
26
+ query = request.args.get('query') # Assuming the query is sent in JSON format
27
+ system_prompt = str(request.args.get('system', "Be Helpful and Friendly. Keep your response straightfoward, short and concise")) # Optional parameter with default value
28
+ model = str(request.args.get('model', "meta-llama/Meta-Llama-3-70B-Instruct")) # Optional parameter with default value
29
+ max_tokens = int(request.args.get('max_tokens', 512))
30
+ temperature = float(request.args.get('temperature', 0.7)) # Optional parameter with default value
31
+
32
+ if query:
33
+ response = deepinfra.generate(query, model=model, system_prompt=system_prompt, temperature=temperature, max_tokens=max_tokens)
34
+ return jsonify([{'response': response}, {'developer_info': api_info.developer_info}]), 200
35
+
36
+ else:
37
+ return jsonify(api_info.error_message), 400
38
+
39
+ if __name__ == '__main__':
40
+ app.run(debug=True)
41
+
42
+
deepinfra.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import requests
3
+ from typing import Union
4
+
5
+ def generate(message: str, model: str='meta-llama/Meta-Llama-3-70B-Instruct', system_prompt: str = "Be Helpful and Friendly. Keep your response straightfoward, short and concise", max_tokens: int = 512, temperature: float = 0.7) -> Union[str, None]:
6
+ """
7
+ Utilizes a variety of large language models (LLMs) to engage in conversational interactions.
8
+
9
+ Parameters:
10
+ - model (str): The name or identifier of the LLM to be used for conversation. Available models include:
11
+ - "meta-llama/Meta-Llama-3-70B-Instruct"
12
+ - "meta-llama/Meta-Llama-3-8B-Instruct"
13
+ - "mistralai/Mixtral-8x22B-Instruct-v0.1"
14
+ - "mistralai/Mixtral-8x22B-v0.1"
15
+ - "microsoft/WizardLM-2-8x22B"
16
+ - "microsoft/WizardLM-2-7B"
17
+ - "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1"
18
+ - "google/gemma-1.1-7b-it"
19
+ - "databricks/dbrx-instruct"
20
+ - "mistralai/Mixtral-8x7B-Instruct-v0.1"
21
+ - "mistralai/Mistral-7B-Instruct-v0.2"
22
+ - "meta-llama/Llama-2-70b-chat-hf"
23
+ - "cognitivecomputations/dolphin-2.6-mixtral-8x7b"
24
+ - message (str): The message to be sent to the LLM to initiate or continue the conversation.
25
+ - system_prompt (str): Optional. The initial system message to start the conversation. Defaults to "Talk Like Shakespeare".
26
+
27
+ Returns:
28
+ - Union[str, None]: The response message from the LLM if successful, otherwise None.
29
+ """
30
+ url = "https://api.deepinfra.com/v1/openai/chat/completions"
31
+
32
+ data = json.dumps(
33
+ {
34
+ 'model': model,
35
+ 'messages': [{"role": "system", "content": system_prompt}] + [{"role": "user", "content": message}] ,
36
+ 'temperature': temperature,
37
+ 'max_tokens': max_tokens,
38
+ 'stop': [],
39
+ 'stream': False
40
+ }, separators=(',', ':')
41
+ )
42
+
43
+ try:
44
+ result = requests.post(url=url, data=data)
45
+ return result.json()['choices'][0]['message']['content']
46
+ except Exception as e:
47
+ print("Error:", e)
48
+ return None
49
+
50
+
51
+
52
+ if __name__ == "__main__":
53
+
54
+ model_names = [
55
+ "meta-llama/Meta-Llama-3-70B-Instruct",
56
+ "meta-llama/Meta-Llama-3-8B-Instruct",
57
+ "mistralai/Mixtral-8x22B-Instruct-v0.1",
58
+ "mistralai/Mixtral-8x22B-v0.1",
59
+ "microsoft/WizardLM-2-8x22B",
60
+ "microsoft/WizardLM-2-7B",
61
+ "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
62
+ "google/gemma-1.1-7b-it",
63
+ "databricks/dbrx-instruct",
64
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
65
+ "mistralai/Mistral-7B-Instruct-v0.2",
66
+ "meta-llama/Llama-2-70b-chat-hf",
67
+ "cognitivecomputations/dolphin-2.6-mixtral-8x7b"
68
+ ]
69
+
70
+ for name in model_names:
71
+ messages = "Introduce yourself and tell who made you and about your owner company" # Add more messages as needed
72
+ response = generate(name, messages, system_prompt="Respond very concisely and shortly")
73
+
74
+ print(f"• Model: {name} -", response)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ fastapi==0.110.2
2
+ Flask==3.0.3
3
+ Requests==2.31.0
4
+ uvicorn==0.29.0