Vipitis commited on
Commit
74b2bf0
1 Parent(s): 46e097d

slightly improved default values

Browse files
Files changed (1) hide show
  1. app.py +34 -9
app.py CHANGED
@@ -3,6 +3,7 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
  import datasets
4
  import asyncio
5
  import numpy as np
 
6
 
7
  def make_script(shader_code):
8
  # code copied and fixed(escaping single quotes to double quotes!!!) from https://webglfundamentals.org/webgl/webgl-shadertoy.html
@@ -272,6 +273,7 @@ outro_text ="""
272
  - [] support FIM task for better model context
273
  - [~] include some context for prompt (title, comments before a functions) - now works with the first comment inside a function body (has to be first)
274
  - [] gradio examples
 
275
 
276
  ### Notes:
277
  - this is meant as a resource to show code generation for a "creative" task.
@@ -281,6 +283,14 @@ outro_text ="""
281
  - If you create a remix with these tools, please attribute the original creator of your starting point when sharing the results. (And perhaps share in the [discussion tab](https://huggingface.co/Vipitis/santacoder-finetuned-the-stack-glsl/discussions?status=open&type=discussion) too)
282
  """
283
 
 
 
 
 
 
 
 
 
284
  passes_dataset = datasets.load_dataset("Vipitis/Shadertoys")
285
  single_passes = passes_dataset.filter(lambda x: not x["has_inputs"] and x["num_passes"] == 1) #could also include shaders with no extra functions.
286
  all_single_passes = datasets.concatenate_datasets([single_passes["train"], single_passes["test"]])
@@ -321,9 +331,13 @@ def _parse_functions(in_code):
321
  PIPE = None
322
 
323
  def _make_pipeline(model_cp = "Vipitis/santacoder-finetuned-Shadertoys-fine"): #bad default model for testing
 
 
 
 
324
  tokenizer = AutoTokenizer.from_pretrained(model_cp, trust_remote_code=True)
325
  model = AutoModelForCausalLM.from_pretrained(model_cp, trust_remote_code=True)
326
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, trust_remote_code=True)
327
  PIPE = pipe # set the global?
328
  print(f"loaded model {model_cp} as a pipline")
329
  return pipe
@@ -342,7 +356,7 @@ def get_full_replacement(orig_code, retn_start_idx, retn_end_idx, prediction) ->
342
  variation = orig_code[:retn_start_idx] + generated + orig_code[retn_end_idx:]
343
  return variation
344
 
345
- def alter_return(orig_code, func_idx="0:", pipeline=PIPE): #default pipeline can't be passed as gloabl?
346
  """
347
  Replaces the return statement of a function with a generated one.
348
  Args:
@@ -356,8 +370,13 @@ def alter_return(orig_code, func_idx="0:", pipeline=PIPE): #default pipeline can
356
  print("no pipeline found, loading default one")
357
  pipeline = _make_pipeline()
358
 
359
- print(f"{func_idx=}")
360
- func_idx = int(func_idx.split(":")[0].strip())
 
 
 
 
 
361
 
362
  retrns = []
363
  retrn_start_idx = orig_code.find("return")
@@ -391,7 +410,7 @@ def _line_chr2char(text, line_idx, chr_idx):
391
  return char_idx
392
 
393
 
394
- def alter_body(old_code, func_id: str, funcs_list: list, pipeline=PIPE):
395
  """
396
  Replaces the body of a function with a generated one.
397
  Args:
@@ -401,8 +420,13 @@ def alter_body(old_code, func_id: str, funcs_list: list, pipeline=PIPE):
401
  Returns:
402
  str: The altered code.
403
  """
404
- print(f"{func_id=}")
405
- func_id = int(func_id.split(":")[0].strip()) #undo their string casting?
 
 
 
 
 
406
  func_node = funcs_list[func_id]
407
  print(f"using for generation: {func_node=}")
408
 
@@ -468,7 +492,7 @@ with gr.Blocks() as site:
468
  top_md = gr.Markdown(intro_text)
469
  model_cp = gr.Textbox(value="Vipitis/santacoder-finetuned-Shadertoys-fine", label="Model Checkpoint (Enter to load!)", interactive=True)
470
  sample_idx = gr.Slider(minimum=0, maximum=num_samples, value=3211, label="pick sample from dataset", step=1.0)
471
- func_dropdown = gr.Dropdown(label="chose a function to modify") #breaks if I add a string in before that?
472
  with gr.Row():
473
  gen_return_button = gr.Button("generate a alternate return statement", label="generate return")
474
  gen_func_button = gr.Button("generate an alternate function body", label="generate function")
@@ -477,12 +501,13 @@ with gr.Blocks() as site:
477
  with gr.Column():
478
  source_embed = gr.HTML('<iframe width="640" height="360" frameborder="0" src="" allowfullscreen></iframe>', label="How this shader originally renders")
479
  our_embed = gr.HTML(label="glsl render of the current code")
480
- sample_code = gr.Code("// touch the slider to select a shader", label="Current Code (will update changes you generate)", language=None)
481
  bot_md = gr.Markdown(outro_text)
482
  sample_pass = gr.State(value={})
483
  pipe = gr.State(value=PIPE)
484
  pipe.value=_make_pipeline("Vipitis/santacoder-finetuned-Shadertoys-fine") # set a default like this?
485
  funcs = gr.State(value=[])
 
486
  # hist_state = gr.State(Value={})
487
  # history_table = gr.JSON()
488
 
 
3
  import datasets
4
  import asyncio
5
  import numpy as np
6
+ import torch
7
 
8
  def make_script(shader_code):
9
  # code copied and fixed(escaping single quotes to double quotes!!!) from https://webglfundamentals.org/webgl/webgl-shadertoy.html
 
273
  - [] support FIM task for better model context
274
  - [~] include some context for prompt (title, comments before a functions) - now works with the first comment inside a function body (has to be first)
275
  - [] gradio examples
276
+ - [] use GPU if available, respect memory restrictions.
277
 
278
  ### Notes:
279
  - this is meant as a resource to show code generation for a "creative" task.
 
283
  - If you create a remix with these tools, please attribute the original creator of your starting point when sharing the results. (And perhaps share in the [discussion tab](https://huggingface.co/Vipitis/santacoder-finetuned-the-stack-glsl/discussions?status=open&type=discussion) too)
284
  """
285
 
286
+ new_shadertoy_code = """void mainImage( out vec4 fragColor, in vec2 fragCoord )
287
+ {
288
+ // touch the slider to load a shader from the dataset or start coding from here.
289
+ vec2 uv = fragCoord/iResolution.xy;
290
+ vec3 col = 0.5 + 0.5*cos(iTime+uv.xyx+vec3(0,2,4));
291
+ fragColor = vec4(col,1.0);
292
+ }"""
293
+
294
  passes_dataset = datasets.load_dataset("Vipitis/Shadertoys")
295
  single_passes = passes_dataset.filter(lambda x: not x["has_inputs"] and x["num_passes"] == 1) #could also include shaders with no extra functions.
296
  all_single_passes = datasets.concatenate_datasets([single_passes["train"], single_passes["test"]])
 
331
  PIPE = None
332
 
333
  def _make_pipeline(model_cp = "Vipitis/santacoder-finetuned-Shadertoys-fine"): #bad default model for testing
334
+ # if torch.cuda.is_available():
335
+ # device = "cuda"
336
+ # else:
337
+ # device = "cpu"
338
  tokenizer = AutoTokenizer.from_pretrained(model_cp, trust_remote_code=True)
339
  model = AutoModelForCausalLM.from_pretrained(model_cp, trust_remote_code=True)
340
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, trust_remote_code=True) #, device=device)
341
  PIPE = pipe # set the global?
342
  print(f"loaded model {model_cp} as a pipline")
343
  return pipe
 
356
  variation = orig_code[:retn_start_idx] + generated + orig_code[retn_end_idx:]
357
  return variation
358
 
359
+ def alter_return(orig_code, func_idx, pipeline=PIPE): #default pipeline can't be passed as gloabl?
360
  """
361
  Replaces the return statement of a function with a generated one.
362
  Args:
 
370
  print("no pipeline found, loading default one")
371
  pipeline = _make_pipeline()
372
 
373
+ if isinstance(func_idx, str):
374
+ print(f"{func_idx=}")
375
+ func_idx = int(func_idx.split(":")[0].strip())
376
+ elif isinstance(func_idx, int):
377
+ pass
378
+ else:
379
+ raise gr.Error(f"func_idx must be int or str, not {type(func_idx)}")
380
 
381
  retrns = []
382
  retrn_start_idx = orig_code.find("return")
 
410
  return char_idx
411
 
412
 
413
+ def alter_body(old_code, func_id, funcs_list: list, pipeline=PIPE):
414
  """
415
  Replaces the body of a function with a generated one.
416
  Args:
 
420
  Returns:
421
  str: The altered code.
422
  """
423
+ if isinstance(func_id, str):
424
+ print(f"{func_id=}")
425
+ func_id = int(func_id.split(":")[0].strip()) #undo their string casting?
426
+ elif isinstance(func_id, int):
427
+ pass
428
+ else:
429
+ raise gr.Error(f"func_id must be int or str, not {type(func_id)}")
430
  func_node = funcs_list[func_id]
431
  print(f"using for generation: {func_node=}")
432
 
 
492
  top_md = gr.Markdown(intro_text)
493
  model_cp = gr.Textbox(value="Vipitis/santacoder-finetuned-Shadertoys-fine", label="Model Checkpoint (Enter to load!)", interactive=True)
494
  sample_idx = gr.Slider(minimum=0, maximum=num_samples, value=3211, label="pick sample from dataset", step=1.0)
495
+ func_dropdown = gr.Dropdown(value=["0: edit the Code (or load a shader) to update this dropdown"], label="chose a function to modify") #breaks if I add a string in before that? #TODO: use type="index" to get int - always gives None?
496
  with gr.Row():
497
  gen_return_button = gr.Button("generate a alternate return statement", label="generate return")
498
  gen_func_button = gr.Button("generate an alternate function body", label="generate function")
 
501
  with gr.Column():
502
  source_embed = gr.HTML('<iframe width="640" height="360" frameborder="0" src="" allowfullscreen></iframe>', label="How this shader originally renders")
503
  our_embed = gr.HTML(label="glsl render of the current code")
504
+ sample_code = gr.Code(new_shadertoy_code, label="Current Code (will update changes you generate)", language=None)
505
  bot_md = gr.Markdown(outro_text)
506
  sample_pass = gr.State(value={})
507
  pipe = gr.State(value=PIPE)
508
  pipe.value=_make_pipeline("Vipitis/santacoder-finetuned-Shadertoys-fine") # set a default like this?
509
  funcs = gr.State(value=[])
510
+ # funcs.value.append(list_dropdown(sample_code.value)[0]) #to circumvent the json issue?
511
  # hist_state = gr.State(Value={})
512
  # history_table = gr.JSON()
513