Neo111x commited on
Commit
9781fee
·
verified ·
1 Parent(s): f236558

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -54,9 +54,9 @@ before = f"# This is the assembly code:\n"#prompt
54
  after = "\n# What is the source code?\n"#prompt
55
  asm_func = before+asm_func.strip()+after
56
  tokenizer = AutoTokenizer.from_pretrained(model_path)
57
- model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype="auto", device_map="auto").to("cuda:0")
58
 
59
- inputs = tokenizer(asm_func, return_tensors="pt").to("cuda:0")
60
  with torch.no_grad():
61
  outputs = model.generate(**inputs, max_new_tokens=2048)### max length to 4096, max new tokens should be below the range
62
  c_func_decompile = tokenizer.decode(outputs[0][len(inputs[0]):-1])
 
54
  after = "\n# What is the source code?\n"#prompt
55
  asm_func = before+asm_func.strip()+after
56
  tokenizer = AutoTokenizer.from_pretrained(model_path)
57
+ model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype="auto", device_map="auto")
58
 
59
+ inputs = tokenizer(asm_func, return_tensors="pt")
60
  with torch.no_grad():
61
  outputs = model.generate(**inputs, max_new_tokens=2048)### max length to 4096, max new tokens should be below the range
62
  c_func_decompile = tokenizer.decode(outputs[0][len(inputs[0]):-1])