ysharma HF staff commited on
Commit
ff60419
1 Parent(s): 015a9ef

create app.py

Browse files
Files changed (1) hide show
  1. app.py +119 -196
app.py CHANGED
@@ -5,211 +5,134 @@ import gradio as gr
5
  import requests
6
  from huggingface_hub import whoami, list_models
7
 
8
-
9
- def error_str(error, title="Error"):
10
- return f"""#### {title}
11
- {error}""" if error else ""
12
-
13
- def url_to_model_id(model_id_str):
14
- return model_id_str.split("/")[-2] + "/" + model_id_str.split("/")[-1] if model_id_str.startswith("https://huggingface.co/") else model_id_str
15
-
16
- def has_diffusion_model(model_id, token):
17
- api = HfApi(token=token)
18
- return any([f.endswith("diffusion_pytorch_model.bin") for f in api.list_repo_files(repo_id=model_id)])
19
-
20
- def get_my_model_names(token):
21
-
22
- try:
23
- author = whoami(token=token)
24
- model_infos = list_models(author=author["name"], use_auth_token=token)
25
-
26
-
27
- model_names = []
28
- for model_info in model_infos:
29
- model_id = model_info.modelId
30
- if has_diffusion_model(model_id, token):
31
- model_names.append(model_id)
32
-
33
- # if not model_names:
34
- # return [], Exception("No diffusion models found in your account.")
35
-
36
- return model_names, None
37
-
38
- except Exception as e:
39
- return [], e
40
-
41
- def on_token_change(token):
42
-
43
- if token:
44
- model_names, error = get_my_model_names(token)
45
- return gr.update(visible=not error), gr.update(choices=model_names, label="Select a model:"), error_str(error)
46
- else:
47
- return gr.update(visible=False), gr.update(choices=[], label="Select a model:"), None
48
-
49
- def on_load_model(user_model_id, other_model_id, token):
50
-
51
- if not user_model_id and not other_model_id:
52
- return None, None, None, None, gr.update(value=error_str("Please enter a model ID."))
53
-
54
- try:
55
- model_id = url_to_model_id(other_model_id) if other_model_id else user_model_id
56
- original_model_id = model_id
57
-
58
- if not has_diffusion_model(model_id, token):
59
- return None, None, None, None, gr.update(value=error_str("There are no diffusion weights in the model you selected."))
60
-
61
- user = whoami(token=token)
62
- model_id = user["name"] + "/" + model_id.split("/")[-1]
63
- title = " ".join([w.capitalize() for w in model_id.split("/")[-1].replace("-", " ").replace("_", " ").split(" ")])
64
-
65
- description = f"""Demo for <a href="https://huggingface.co/{original_model_id}">{title}</a> Stable Diffusion model."""
66
-
67
- return gr.update(visible=True), gr.update(value=model_id), gr.update(value=title), gr.update(value=description), None
68
-
69
- except Exception as e:
70
- return None, None, None, None, gr.update(value=error_str(e))
71
-
72
- def create_and_push(space_type, hardware, private_space, other_model_name, radio_model_names, model_id, title, description, prefix, update, token):
73
-
74
- try:
75
-
76
- # 1. Create the new space
77
- api = HfApi(token=token)
78
- repo_url = api.create_repo(
79
- repo_id=model_id,
80
- exist_ok=update,
81
- repo_type="space",
82
- space_sdk="gradio",
83
- private=private_space
84
  )
85
- api_url = f'https://huggingface.co/api/spaces/{model_id}'
86
- headers = { "Authorization" : f"Bearer {token}"}
87
- # add HUGGING_FACE_HUB_TOKEN secret to new space
88
- requests.post(f'{api_url}/secrets', json={"key":"HUGGING_FACE_HUB_TOKEN","value":token}, headers=headers)
89
- # set new Space Hardware flavor
90
- requests.post(f'{api_url}/hardware', json={'flavor': hardware}, headers=headers)
91
-
92
- # 2. Replace the name, title, and description in the template
93
- with open("template/app_simple.py" if space_type == "Simple" else "template/app_advanced.py", "r") as f:
94
- app = f.read()
95
- app = app.replace("$model_id", url_to_model_id(other_model_name) if other_model_name else radio_model_names)
96
- app = app.replace("$title", title)
97
- app = app.replace("$description", description)
98
- app = app.replace("$prefix", prefix)
99
- app = app.replace("$space_id", whoami(token=token)["name"] + "/" + model_id.split("/")[-1])
100
-
101
- # 3. save the new app.py file
102
- with open("app.py", "w") as f:
103
- f.write(app)
104
-
105
- # 4. Upload the new app.py to the space
106
- api.upload_file(
107
- path_or_fileobj="app.py",
108
- path_in_repo="app.py",
109
- repo_id=model_id,
110
- token=token,
111
- repo_type="space",
112
  )
113
-
114
- # 5. Upload template/requirements.txt to the space
115
- if space_type == "Advanced":
116
- api.upload_file(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  path_or_fileobj="template/requirements.txt",
118
  path_in_repo="requirements.txt",
119
  repo_id=model_id,
120
  token=token,
121
- repo_type="space",
122
- )
123
-
124
- # 5. Delete the app.py file
125
- os.remove("app.py")
126
-
127
- return f"""Successfully created space at: <a href="{repo_url}" target="_blank">{repo_url}</a>"""
128
-
129
- except Exception as e:
130
- return error_str(e)
131
-
132
-
133
- DESCRIPTION = """### Create a gradio space for your Diffusers🧨 model
134
- With this space, you can easily create a gradio demo for your Diffusers model and share it with the community.<br>
135
- 1️⃣ Make sure you have created your hugging face account<br>
136
- 2️⃣ Generate a token here with write access<br>
137
- 3️⃣ Choose a stable diffusion base model, there are thousands of them here<br>
138
- 4️⃣ Choose Space type<br>
139
- 5️⃣ Choose the new Space Hardware<br>
140
- It is done.
141
- """
 
142
 
 
143
  with gr.Blocks() as demo:
144
-
145
- gr.Markdown(DESCRIPTION)
146
  with gr.Row():
 
 
 
 
 
 
 
 
 
 
147
 
148
- with gr.Column(scale=11):
149
- with gr.Column():
150
- gr.Markdown("#### 1. Choose a model")
151
- input_token = gr.Textbox(
152
- max_lines=1,
153
- type="password",
154
- label="Enter your Hugging Face token",
155
- placeholder="WRITE permission is required!",
156
- )
157
- gr.Markdown("You can get a token [here](https://huggingface.co/settings/tokens)")
158
- with gr.Group(visible=False) as group_model:
159
- radio_model_names = gr.Radio(label="Your models:")
160
- other_model_name = gr.Textbox(label="Other model:", placeholder="URL or model id, e.g. username/model_name")
161
- btn_load = gr.Button(value="Load model")
162
-
163
- with gr.Column(scale=10):
164
- with gr.Column(visible=False) as group_create:
165
- gr.Markdown("#### 2. Enter details and create the space")
166
- name = gr.Textbox(label="Name", placeholder="e.g. diffusers-demo")
167
- title = gr.Textbox(label="Title", placeholder="e.g. Diffusers Demo")
168
- description = gr.Textbox(label="Description", placeholder="e.g. Demo for my awesome Diffusers model", lines=5)
169
- prefix = gr.Textbox(label="Prefix tokens", placeholder="Tokens that are required to be present in the prompt, e.g. `rick and morty style`")
170
- gr.Markdown("""#### Choose space type
171
- - **Simple** - Runs on GPU using Hugging Face inference API, but you cannot control image generation parameters.
172
- - **Advanced** - Runs on CPU by default, with the option to upgrade to GPU. You can control image generation parameters: guidance, number of steps, image size, etc. Also supports **image-to-image** generation.""")
173
- space_type =gr.Radio(label="Space type", choices=["Simple", "Advanced"], value="Simple")
174
- update = gr.Checkbox(label="Update the space if it already exists?")
175
- private_space = gr.Checkbox(label="Private Space")
176
- gr.Markdown("Choose the new Space Hardware <small>[check pricing page](https://huggingface.co/pricing#spaces), you need payment method to upgrade your Space hardware</small>")
177
- hardware = gr.Dropdown(["cpu-basic","cpu-upgrade","t4-small","t4-medium","a10g-small","a10g-large"],value = "cpu-basic", label="Space Hardware")
178
- brn_create = gr.Button("Create the space")
179
-
180
- error_output = gr.Markdown(label="Output")
181
-
182
-
183
- input_token.change(
184
- fn=on_token_change,
185
- inputs=input_token,
186
- outputs=[group_model, radio_model_names, error_output],
187
- queue=False,
188
- scroll_to_output=True)
189
-
190
- btn_load.click(
191
- fn=on_load_model,
192
- inputs=[radio_model_names, other_model_name, input_token],
193
- outputs=[group_create, name, title, description, error_output],
194
- queue=False,
195
- scroll_to_output=True)
196
-
197
- brn_create.click(
198
- fn=create_and_push,
199
- inputs=[space_type, hardware, private_space, other_model_name, radio_model_names, name, title, description, prefix, update, input_token],
200
- outputs=[error_output],
201
- scroll_to_output=True
202
- )
203
-
204
- # gr.Markdown("""<img src="https://raw.githubusercontent.com/huggingface/diffusers/main/docs/source/imgs/diffusers_library.jpg" width="150"/>""")
205
- gr.HTML("""
206
- <div style="border-top: 1px solid #303030;">
207
- <br>
208
- <p>Space by: <a href="https://twitter.com/hahahahohohe"><img src="https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social" alt="Twitter Follow"></a></p><br>
209
- <a href="https://www.buymeacoffee.com/anzorq" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 45px !important;width: 162px !important;" ></a><br><br>
210
- <p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.sd-space-creator" alt="visitors"></p>
211
- </div>
212
- """)
213
-
214
  demo.queue()
215
  demo.launch(debug=True)
 
5
  import requests
6
  from huggingface_hub import whoami, list_models
7
 
8
+ #Code for extracting the markdown fies from a Repo
9
+ #To get markdowns from github for any/your repo
10
+ def get_github_docs(repo_link):
11
+ repo_owner, repo_name = repo_link.split('/')[-2], repo_link.split('/')[-1]
12
+
13
+ with tempfile.TemporaryDirectory() as d:
14
+ subprocess.check_call(
15
+ f"git clone https://github.com/{repo_owner}/{repo_name}.git .",
16
+ cwd=d,
17
+ shell=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  )
19
+ git_sha = (
20
+ subprocess.check_output("git rev-parse HEAD", shell=True, cwd=d)
21
+ .decode("utf-8")
22
+ .strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  )
24
+ repo_path = pathlib.Path(d)
25
+ markdown_files = list(repo_path.rglob("*.md")) + list(
26
+ repo_path.rglob("*.mdx")
27
+ )
28
+ for markdown_file in markdown_files:
29
+ try:
30
+ with open(markdown_file, "r") as f:
31
+ relative_path = markdown_file.relative_to(repo_path)
32
+ github_url = f"https://github.com/{repo_owner}/{repo_name}/blob/{git_sha}/{relative_path}"
33
+ yield Document(page_content=f.read(), metadata={"source": github_url})
34
+ except FileNotFoundError:
35
+ print(f"Could not open file: {markdown_file}")
36
+
37
+ #Code for creating a new space for the user
38
+ def create_space(repo_link, hf_token):
39
+ print("***********INSIDE CREATE SPACE***************")
40
+ repo_name = repo_link.split('/')[-1]
41
+ api = HfApi(token=hf_token)
42
+ repo_url = api.create_repo(
43
+ repo_id=f'LangChain_{repo_name}Bot', #example - ysharma/LangChain_GradioBot
44
+ repo_type="space",
45
+ space_sdk="gradio",
46
+ private=False)
47
+
48
+ #Code for creating the search index
49
+ #Saving search index to disk
50
+ def create_search_index(repo_link, openai_api_key):
51
+ print("***********INSIDE CREATE SEARCH INDEX***************")
52
+ #openai = OpenAI(temperature=0, openai_api_key=openai_api_key )
53
+ sources = get_github_docs(repo_link) #"gradio-app", "gradio"
54
+ source_chunks = []
55
+ splitter = CharacterTextSplitter(separator=" ", chunk_size=1024, chunk_overlap=0)
56
+ for source in sources:
57
+ for chunk in splitter.split_text(source.page_content):
58
+ source_chunks.append(Document(page_content=chunk, metadata=source.metadata))
59
+
60
+ search_index = FAISS.from_documents(source_chunks, OpenAIEmbeddings(openai_api_key=openai_api_key))
61
+
62
+ #saving FAISS search index to disk
63
+ with open("search_index.pickle", "wb") as f:
64
+ pickle.dump(search_index, f)
65
+ return "search_index.pickle"
66
+
67
+ def upload_files_to_space(repo_link, hf_token)
68
+ print("***********INSIDE UPLOAD FILES TO SPACE***************")
69
+ repo_name = repo_link.split('/')[-1]
70
+ #Replacing the repo namein app.py
71
+ with open("template/app_og.py", "r") as f:
72
+ app = f.read()
73
+ app = app.replace("$RepoName", reponame)
74
+ #app = app.replace("$space_id", whoami(token=token)["name"] + "/" + model_id.split("/")[-1])
75
+
76
+ #Saving the new app.py file to disk
77
+ with open("template/app.py", "w") as f:
78
+ f.write(app)
79
+
80
+ #Uploading the new app.py to the new space
81
+ api.upload_file(
82
+ path_or_fileobj = "template/app.py",
83
+ path_in_repo = "app.py",
84
+ repo_id = f'LangChain_{repo_name}Bot' #model_id,
85
+ token = hf_token,
86
+ repo_type="space",)
87
+ #Uploading the new search_index file to the new space
88
+ api.upload_file(
89
+ path_or_fileobj = "search_index.pickle",
90
+ path_in_repo = "search_index.pickle",
91
+ repo_id = f'LangChain_{repo_name}Bot' #model_id,
92
+ token = hf_token,
93
+ repo_type="space",)
94
+ #Upload requirements.txt to the space
95
+ api.upload_file(
96
  path_or_fileobj="template/requirements.txt",
97
  path_in_repo="requirements.txt",
98
  repo_id=model_id,
99
  token=token,
100
+ repo_type="space",)
101
+ #Deleting the files - search_index and app.py file
102
+ os.remove("template/app.py")
103
+ os.remove("search_index.pickle")
104
+
105
+ user_name = whoami(token=hf_token)['name']
106
+ repo_url = f"https://huggingface.co/spaces/{user_name}/LangChain_{repo_name}Bot"
107
+ space_name = f"{user_name}/LangChain_{repo_name}Bot"
108
+ return f"Successfully created the Chatbot at: <a href="+ repo_url + " target='_blank'>" + space_name + "</a>"
109
+
110
+ def driver(repo_link, hf_token):
111
+ #create search index openai_api_key=openai_api_key
112
+ #search_index_pickle = create_search_index(repo_link, openai_api_key)
113
+ #create a new space
114
+ print("***********INSIDE DRIVER***************")
115
+ create_space(repo_link, hf_token)
116
+ #upload files to the new space
117
+ html_tag = upload_files_to_space(repo_link, hf_token)
118
+ print(f"html tag is : {html_tag}")
119
+ return html_tag
120
+
121
+
122
 
123
+ #Gradio code for Repo as input and search index as output file
124
  with gr.Blocks() as demo:
 
 
125
  with gr.Row():
126
+ repo_link = gr.Textbox(label="Enter Github repo name")
127
+ hf_token_in = gr.Textbox(type='password', label="Enter hf-token name")
128
+ openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
129
+ with gr.Row():
130
+ btn_faiss = gr.Button("Create Search index")
131
+ btn_create_space = gr.Button("Create YOur Chatbot")
132
+ html_out = gr.HTML()
133
+ search_index_file = gr.File()
134
+ btn_faiss.click(create_search_index, [repo_link, openai_api_key],search_index_file )
135
+ btn_create_space.click(driver, [repo_link, hf_token_in], html_out
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  demo.queue()
138
  demo.launch(debug=True)