hunthinn commited on
Commit
0975142
1 Parent(s): c7af220

load my model

Browse files
Files changed (2) hide show
  1. __pycache__/app.cpython-310.pyc +0 -0
  2. app.py +13 -9
__pycache__/app.cpython-310.pyc CHANGED
Binary files a/__pycache__/app.cpython-310.pyc and b/__pycache__/app.cpython-310.pyc differ
 
app.py CHANGED
@@ -1,19 +1,23 @@
1
- from io import BytesIO
2
  from flask import Flask, jsonify, request
3
  from transformers import GPT2Tokenizer, GPT2LMHeadModel
4
 
5
  # Load the fine-tuned model and tokenizer
6
- model_path = "gpt2"
7
- tokenizer = GPT2Tokenizer.from_pretrained(model_path)
 
8
  model = GPT2LMHeadModel.from_pretrained(model_path)
9
-
10
 
11
  def infer_title(input):
12
- input_text = "Q: " + input + " A:"
13
- input_ids = tokenizer.encode(input_text, return_tensors='pt')
14
- output = model.generate(input_ids, max_length=50, num_return_sequences=1)
15
- response = tokenizer.decode(output[0], skip_special_tokens=True)
16
- return response
 
 
 
17
 
18
  app = Flask(__name__)
19
 
 
1
+
2
  from flask import Flask, jsonify, request
3
  from transformers import GPT2Tokenizer, GPT2LMHeadModel
4
 
5
  # Load the fine-tuned model and tokenizer
6
+ tokenizer_path = "gpt2"
7
+ model_path = 'hunthinn/movie_title_gpt2'
8
+ tokenizer = GPT2Tokenizer.from_pretrained(tokenizer_path)
9
  model = GPT2LMHeadModel.from_pretrained(model_path)
10
+ tokenizer.pad_token = tokenizer.eos_token
11
 
12
  def infer_title(input):
13
+ if input:
14
+ input_text = "Q: " + input + " A:"
15
+ input_ids = tokenizer.encode(input_text, return_tensors='pt')
16
+ output = model.generate(input_ids, max_length=50, num_return_sequences=1)
17
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
18
+ response = response.split('A:')
19
+ return response[-1]
20
+
21
 
22
  app = Flask(__name__)
23