SpeedStar101 commited on
Commit
df94fd5
1 Parent(s): 97c8648

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -3
README.md CHANGED
@@ -82,7 +82,6 @@ def generate_response(model, tokenizer, input_text, max_length=300, min_length=2
82
  num_beams=num_beams,
83
  no_repeat_ngram_size=2,
84
  do_sample=True,
85
- repetition_penalty=repetition_penalty # Added repetition_penalty
86
  )
87
 
88
  # Decode the generated responses
@@ -100,7 +99,7 @@ def generate_response(model, tokenizer, input_text, max_length=300, min_length=2
100
 
101
 
102
  # Load pre-trained model and tokenizer
103
- access_token = "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"
104
  model_id = "Starcodium/VergilGPT2"
105
  tokenizer = AutoTokenizer.from_pretrained(model_id, revision="main", use_auth_token=access_token)
106
  model = AutoModelForCausalLM.from_pretrained(model_id, revision="main", use_auth_token=access_token)
@@ -114,7 +113,7 @@ while True:
114
  if input_text.lower() in ["quit", "exit"]:
115
  break
116
 
117
- responses = generate_response(model, tokenizer, input_text, max_length=100, min_length=20, num_return_sequences=1, temperature=0.7, top_k=40, top_p=0.5, num_beams=1, repetition_penalty=1.2)
118
  responses = [r for r in responses if r.strip() != '']
119
  if responses:
120
  response = responses[0]
 
82
  num_beams=num_beams,
83
  no_repeat_ngram_size=2,
84
  do_sample=True,
 
85
  )
86
 
87
  # Decode the generated responses
 
99
 
100
 
101
  # Load pre-trained model and tokenizer
102
+ access_token = "REPLACE_WITH_ACCESS_TOKEN"
103
  model_id = "Starcodium/VergilGPT2"
104
  tokenizer = AutoTokenizer.from_pretrained(model_id, revision="main", use_auth_token=access_token)
105
  model = AutoModelForCausalLM.from_pretrained(model_id, revision="main", use_auth_token=access_token)
 
113
  if input_text.lower() in ["quit", "exit"]:
114
  break
115
 
116
+ responses = generate_response(model, tokenizer, input_text, max_length=100, min_length=20, num_return_sequences=1, temperature=0.7, top_k=40, top_p=0.5, num_beams=1)
117
  responses = [r for r in responses if r.strip() != '']
118
  if responses:
119
  response = responses[0]